2024-11-25 07:28:00,863 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-25 07:28:00,874 main DEBUG Took 0.009375 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-25 07:28:00,875 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-25 07:28:00,875 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-25 07:28:00,876 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-25 07:28:00,877 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 07:28:00,883 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-25 07:28:00,894 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,895 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 07:28:00,896 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,896 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 07:28:00,897 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,897 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 07:28:00,898 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,898 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 07:28:00,898 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,899 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 07:28:00,899 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,900 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 07:28:00,900 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,900 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 07:28:00,901 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,901 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 07:28:00,902 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,902 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 07:28:00,902 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,902 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 07:28:00,903 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,903 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 07:28:00,904 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,904 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 07:28:00,904 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,905 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-25 07:28:00,906 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 07:28:00,907 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-25 07:28:00,909 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-25 07:28:00,910 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-25 07:28:00,911 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-25 07:28:00,911 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-25 07:28:00,922 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-25 07:28:00,925 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-25 07:28:00,927 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-25 07:28:00,927 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-25 07:28:00,928 main DEBUG createAppenders(={Console}) 2024-11-25 07:28:00,929 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-25 07:28:00,929 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-25 07:28:00,930 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-25 07:28:00,930 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-25 07:28:00,931 main DEBUG OutputStream closed 2024-11-25 07:28:00,931 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-25 07:28:00,932 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-25 07:28:00,932 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-25 07:28:01,009 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-25 07:28:01,011 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-25 07:28:01,012 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-25 07:28:01,013 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-25 07:28:01,013 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-25 07:28:01,014 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-25 07:28:01,014 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-25 07:28:01,014 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-25 07:28:01,014 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-25 07:28:01,015 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-25 07:28:01,015 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-25 07:28:01,015 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-25 07:28:01,015 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-25 07:28:01,016 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-25 07:28:01,016 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-25 07:28:01,016 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-25 07:28:01,017 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-25 07:28:01,017 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-25 07:28:01,019 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-25 07:28:01,020 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-25 07:28:01,020 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-25 07:28:01,021 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-25T07:28:01,343 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79 2024-11-25 07:28:01,347 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-25 07:28:01,348 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-25T07:28:01,360 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-25T07:28:01,401 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=193, ProcessCount=11, AvailableMemoryMB=8509 2024-11-25T07:28:01,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T07:28:01,418 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/cluster_380e9090-6149-93f9-b7ff-bac561ebb92b, deleteOnExit=true 2024-11-25T07:28:01,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T07:28:01,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/test.cache.data in system properties and HBase conf 2024-11-25T07:28:01,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T07:28:01,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/hadoop.log.dir in system properties and HBase conf 2024-11-25T07:28:01,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T07:28:01,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T07:28:01,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T07:28:01,522 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-25T07:28:01,609 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T07:28:01,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:28:01,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:28:01,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T07:28:01,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:28:01,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T07:28:01,615 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T07:28:01,615 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:28:01,616 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:28:01,616 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T07:28:01,617 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/nfs.dump.dir in system properties and HBase conf 2024-11-25T07:28:01,617 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/java.io.tmpdir in system properties and HBase conf 2024-11-25T07:28:01,617 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:28:01,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T07:28:01,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T07:28:02,133 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:28:02,513 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-25T07:28:02,612 INFO [Time-limited test {}] log.Log(170): Logging initialized @2458ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-25T07:28:02,701 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:28:02,774 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:28:02,799 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:28:02,799 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:28:02,801 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:28:02,815 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:28:02,817 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:28:02,819 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:28:03,054 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/java.io.tmpdir/jetty-localhost-34399-hadoop-hdfs-3_4_1-tests_jar-_-any-12364881954031971182/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:28:03,063 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:34399} 2024-11-25T07:28:03,063 INFO [Time-limited test {}] server.Server(415): Started @2910ms 2024-11-25T07:28:03,096 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:28:03,470 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:28:03,477 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:28:03,478 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:28:03,479 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:28:03,479 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:28:03,480 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d13ec7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:28:03,480 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:28:03,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ca8488f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/java.io.tmpdir/jetty-localhost-44187-hadoop-hdfs-3_4_1-tests_jar-_-any-13278263230250168364/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:28:03,604 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@dc1ca4f{HTTP/1.1, (http/1.1)}{localhost:44187} 2024-11-25T07:28:03,604 INFO [Time-limited test {}] server.Server(415): Started @3451ms 2024-11-25T07:28:03,667 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:28:03,814 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:28:03,821 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:28:03,822 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:28:03,822 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:28:03,823 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:28:03,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ec7bf2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:28:03,827 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:28:03,994 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ca1952e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/java.io.tmpdir/jetty-localhost-46807-hadoop-hdfs-3_4_1-tests_jar-_-any-3976430312844521951/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:28:03,995 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75639b0e{HTTP/1.1, (http/1.1)}{localhost:46807} 2024-11-25T07:28:03,995 INFO [Time-limited test {}] server.Server(415): Started @3842ms 2024-11-25T07:28:03,998 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:28:04,194 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/cluster_380e9090-6149-93f9-b7ff-bac561ebb92b/data/data1/current/BP-1306032755-172.17.0.2-1732519682243/current, will proceed with Du for space computation calculation, 2024-11-25T07:28:04,194 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/cluster_380e9090-6149-93f9-b7ff-bac561ebb92b/data/data3/current/BP-1306032755-172.17.0.2-1732519682243/current, will proceed with Du for space computation calculation, 2024-11-25T07:28:04,194 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/cluster_380e9090-6149-93f9-b7ff-bac561ebb92b/data/data2/current/BP-1306032755-172.17.0.2-1732519682243/current, will proceed with Du for space computation calculation, 2024-11-25T07:28:04,194 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/cluster_380e9090-6149-93f9-b7ff-bac561ebb92b/data/data4/current/BP-1306032755-172.17.0.2-1732519682243/current, will proceed with Du for space computation calculation, 2024-11-25T07:28:04,266 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:28:04,267 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:28:04,343 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaec0676ccbc15d3e with lease ID 0x2b6a9a1af1c44e48: Processing first storage report for DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb from datanode DatanodeRegistration(127.0.0.1:42947, datanodeUuid=3be5ebb6-56e9-435b-a394-794943739344, infoPort=33541, infoSecurePort=0, ipcPort=35511, storageInfo=lv=-57;cid=testClusterID;nsid=1393981232;c=1732519682243) 2024-11-25T07:28:04,345 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaec0676ccbc15d3e with lease ID 0x2b6a9a1af1c44e48: from storage DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb node DatanodeRegistration(127.0.0.1:42947, datanodeUuid=3be5ebb6-56e9-435b-a394-794943739344, infoPort=33541, infoSecurePort=0, ipcPort=35511, storageInfo=lv=-57;cid=testClusterID;nsid=1393981232;c=1732519682243), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-25T07:28:04,345 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdd2cca37bf9c682b with lease ID 0x2b6a9a1af1c44e49: Processing first storage report for DS-c50db246-0a69-4f33-af37-aa71eb481602 from datanode DatanodeRegistration(127.0.0.1:42077, datanodeUuid=7659ee3f-20c3-4e96-bf69-a8836b47a004, infoPort=32801, infoSecurePort=0, ipcPort=44745, storageInfo=lv=-57;cid=testClusterID;nsid=1393981232;c=1732519682243) 2024-11-25T07:28:04,345 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd2cca37bf9c682b with lease ID 0x2b6a9a1af1c44e49: from storage DS-c50db246-0a69-4f33-af37-aa71eb481602 node DatanodeRegistration(127.0.0.1:42077, datanodeUuid=7659ee3f-20c3-4e96-bf69-a8836b47a004, infoPort=32801, infoSecurePort=0, ipcPort=44745, storageInfo=lv=-57;cid=testClusterID;nsid=1393981232;c=1732519682243), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:28:04,346 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaec0676ccbc15d3e with lease ID 0x2b6a9a1af1c44e48: Processing first storage report for DS-551227ba-da3f-4c8e-927c-131f6e89eefe from datanode DatanodeRegistration(127.0.0.1:42947, datanodeUuid=3be5ebb6-56e9-435b-a394-794943739344, infoPort=33541, infoSecurePort=0, ipcPort=35511, storageInfo=lv=-57;cid=testClusterID;nsid=1393981232;c=1732519682243) 2024-11-25T07:28:04,346 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaec0676ccbc15d3e with lease ID 0x2b6a9a1af1c44e48: from storage DS-551227ba-da3f-4c8e-927c-131f6e89eefe node DatanodeRegistration(127.0.0.1:42947, datanodeUuid=3be5ebb6-56e9-435b-a394-794943739344, infoPort=33541, infoSecurePort=0, ipcPort=35511, storageInfo=lv=-57;cid=testClusterID;nsid=1393981232;c=1732519682243), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:28:04,346 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdd2cca37bf9c682b with lease ID 0x2b6a9a1af1c44e49: Processing first storage report for DS-b52c81b9-679c-42a5-aa80-d1869f175bb8 from datanode DatanodeRegistration(127.0.0.1:42077, datanodeUuid=7659ee3f-20c3-4e96-bf69-a8836b47a004, infoPort=32801, infoSecurePort=0, ipcPort=44745, storageInfo=lv=-57;cid=testClusterID;nsid=1393981232;c=1732519682243) 2024-11-25T07:28:04,346 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd2cca37bf9c682b with lease ID 0x2b6a9a1af1c44e49: from storage DS-b52c81b9-679c-42a5-aa80-d1869f175bb8 node DatanodeRegistration(127.0.0.1:42077, datanodeUuid=7659ee3f-20c3-4e96-bf69-a8836b47a004, infoPort=32801, infoSecurePort=0, ipcPort=44745, storageInfo=lv=-57;cid=testClusterID;nsid=1393981232;c=1732519682243), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:28:04,416 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79 2024-11-25T07:28:04,492 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/cluster_380e9090-6149-93f9-b7ff-bac561ebb92b/zookeeper_0, clientPort=49945, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/cluster_380e9090-6149-93f9-b7ff-bac561ebb92b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/cluster_380e9090-6149-93f9-b7ff-bac561ebb92b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T07:28:04,502 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49945 2024-11-25T07:28:04,512 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:28:04,516 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:28:04,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:28:04,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:28:05,209 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6 with version=8 2024-11-25T07:28:05,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/hbase-staging 2024-11-25T07:28:05,303 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-25T07:28:05,556 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:28:05,567 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:28:05,568 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:28:05,572 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:28:05,572 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:28:05,573 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:28:05,706 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T07:28:05,765 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-25T07:28:05,773 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-25T07:28:05,777 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:28:05,803 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 19667 (auto-detected) 2024-11-25T07:28:05,804 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-25T07:28:05,823 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38121 2024-11-25T07:28:05,842 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38121 connecting to ZooKeeper ensemble=127.0.0.1:49945 2024-11-25T07:28:05,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:381210x0, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:28:05,880 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38121-0x1014e06540a0000 connected 2024-11-25T07:28:05,906 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:28:05,908 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:28:05,918 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:28:05,922 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6, hbase.cluster.distributed=false 2024-11-25T07:28:05,951 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:28:05,958 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38121 2024-11-25T07:28:05,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38121 2024-11-25T07:28:05,961 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38121 2024-11-25T07:28:05,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38121 2024-11-25T07:28:05,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38121 2024-11-25T07:28:06,101 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:28:06,103 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:28:06,103 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:28:06,104 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:28:06,104 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:28:06,104 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:28:06,107 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T07:28:06,109 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:28:06,110 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36015 2024-11-25T07:28:06,112 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36015 connecting to ZooKeeper ensemble=127.0.0.1:49945 2024-11-25T07:28:06,113 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:28:06,118 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:28:06,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360150x0, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:28:06,126 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36015-0x1014e06540a0001 connected 2024-11-25T07:28:06,126 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:28:06,130 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T07:28:06,138 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T07:28:06,141 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T07:28:06,146 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:28:06,146 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36015 2024-11-25T07:28:06,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36015 2024-11-25T07:28:06,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36015 2024-11-25T07:28:06,151 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36015 2024-11-25T07:28:06,151 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36015 2024-11-25T07:28:06,167 DEBUG [M:0;5eb3d201e8c9:38121 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5eb3d201e8c9:38121 2024-11-25T07:28:06,168 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5eb3d201e8c9,38121,1732519685358 2024-11-25T07:28:06,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:28:06,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:28:06,176 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5eb3d201e8c9,38121,1732519685358 2024-11-25T07:28:06,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T07:28:06,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:28:06,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:28:06,198 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T07:28:06,199 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5eb3d201e8c9,38121,1732519685358 from backup master directory 2024-11-25T07:28:06,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5eb3d201e8c9,38121,1732519685358 2024-11-25T07:28:06,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:28:06,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:28:06,203 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:28:06,203 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5eb3d201e8c9,38121,1732519685358 2024-11-25T07:28:06,205 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-25T07:28:06,207 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-25T07:28:06,270 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/hbase.id] with ID: 120abb30-37f4-4b8b-81e6-18d04198756b 2024-11-25T07:28:06,271 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/.tmp/hbase.id 2024-11-25T07:28:06,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:28:06,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:28:06,284 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/.tmp/hbase.id]:[hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/hbase.id] 2024-11-25T07:28:06,327 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:28:06,332 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T07:28:06,351 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-25T07:28:06,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:28:06,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:28:06,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:28:06,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:28:06,394 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T07:28:06,397 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T07:28:06,405 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:28:06,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:28:06,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:28:06,462 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store 2024-11-25T07:28:06,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:28:06,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:28:06,487 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-25T07:28:06,490 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:28:06,491 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:28:06,491 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:28:06,492 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:28:06,493 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:28:06,493 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:28:06,493 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:28:06,495 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519686491Disabling compacts and flushes for region at 1732519686491Disabling writes for close at 1732519686493 (+2 ms)Writing region close event to WAL at 1732519686493Closed at 1732519686493 2024-11-25T07:28:06,496 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/.initializing 2024-11-25T07:28:06,497 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/WALs/5eb3d201e8c9,38121,1732519685358 2024-11-25T07:28:06,518 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C38121%2C1732519685358, suffix=, logDir=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/WALs/5eb3d201e8c9,38121,1732519685358, archiveDir=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/oldWALs, maxLogs=10 2024-11-25T07:28:06,527 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C38121%2C1732519685358.1732519686523 2024-11-25T07:28:06,546 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/WALs/5eb3d201e8c9,38121,1732519685358/5eb3d201e8c9%2C38121%2C1732519685358.1732519686523 2024-11-25T07:28:06,555 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32801:32801),(127.0.0.1/127.0.0.1:33541:33541)] 2024-11-25T07:28:06,556 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:28:06,556 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:28:06,560 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:28:06,561 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:28:06,598 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:28:06,624 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T07:28:06,628 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:06,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:28:06,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:28:06,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T07:28:06,635 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:06,637 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:28:06,637 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:28:06,640 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T07:28:06,640 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:06,641 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:28:06,641 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:28:06,644 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T07:28:06,644 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:06,645 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:28:06,645 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:28:06,648 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:28:06,649 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:28:06,654 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:28:06,655 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:28:06,658 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T07:28:06,661 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:28:06,665 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:28:06,666 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742059, jitterRate=-0.056423529982566833}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T07:28:06,672 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732519686573Initializing all the Stores at 1732519686575 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519686576 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519686576Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519686577 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519686577Cleaning up temporary data from old regions at 1732519686655 (+78 ms)Region opened successfully at 1732519686672 (+17 ms) 2024-11-25T07:28:06,673 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T07:28:06,706 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a8b2abf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:28:06,738 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T07:28:06,750 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T07:28:06,750 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T07:28:06,753 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T07:28:06,754 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-25T07:28:06,759 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-25T07:28:06,759 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T07:28:06,784 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T07:28:06,792 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T07:28:06,795 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T07:28:06,797 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T07:28:06,799 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T07:28:06,802 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T07:28:06,804 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T07:28:06,808 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T07:28:06,810 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T07:28:06,812 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T07:28:06,813 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T07:28:06,835 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T07:28:06,836 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T07:28:06,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:28:06,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:28:06,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:28:06,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:28:06,844 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5eb3d201e8c9,38121,1732519685358, sessionid=0x1014e06540a0000, setting cluster-up flag (Was=false) 2024-11-25T07:28:06,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:28:06,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:28:06,863 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T07:28:06,865 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,38121,1732519685358 2024-11-25T07:28:06,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:28:06,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:28:06,878 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T07:28:06,880 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,38121,1732519685358 2024-11-25T07:28:06,887 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T07:28:06,955 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(746): ClusterId : 120abb30-37f4-4b8b-81e6-18d04198756b 2024-11-25T07:28:06,958 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T07:28:06,963 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T07:28:06,963 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T07:28:06,967 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T07:28:06,968 DEBUG [RS:0;5eb3d201e8c9:36015 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66f0f837, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:28:06,969 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T07:28:06,978 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T07:28:06,982 DEBUG [RS:0;5eb3d201e8c9:36015 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5eb3d201e8c9:36015 2024-11-25T07:28:06,984 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T07:28:06,986 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T07:28:06,986 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T07:28:06,986 DEBUG [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T07:28:06,989 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(2659): reportForDuty to master=5eb3d201e8c9,38121,1732519685358 with port=36015, startcode=1732519686061 2024-11-25T07:28:06,990 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5eb3d201e8c9,38121,1732519685358 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T07:28:06,996 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:28:06,996 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:28:06,996 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:28:06,997 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:28:06,997 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5eb3d201e8c9:0, corePoolSize=10, maxPoolSize=10 2024-11-25T07:28:06,997 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:28:06,997 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:28:06,997 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:28:07,002 DEBUG [RS:0;5eb3d201e8c9:36015 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T07:28:07,002 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732519717001 2024-11-25T07:28:07,003 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T07:28:07,004 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:28:07,004 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T07:28:07,004 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T07:28:07,007 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T07:28:07,008 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T07:28:07,008 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T07:28:07,008 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T07:28:07,010 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:07,009 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,010 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T07:28:07,012 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T07:28:07,013 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T07:28:07,014 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T07:28:07,016 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T07:28:07,017 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T07:28:07,020 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519687018,5,FailOnTimeoutGroup] 2024-11-25T07:28:07,021 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519687021,5,FailOnTimeoutGroup] 2024-11-25T07:28:07,022 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,022 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T07:28:07,023 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,023 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:28:07,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:28:07,028 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T07:28:07,028 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6 2024-11-25T07:28:07,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741832_1008 (size=32) 2024-11-25T07:28:07,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741832_1008 (size=32) 2024-11-25T07:28:07,040 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:28:07,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:28:07,050 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:28:07,050 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:07,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:28:07,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:28:07,054 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:28:07,055 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:07,056 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:28:07,056 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:28:07,059 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:28:07,059 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:07,060 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:28:07,060 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:28:07,063 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:28:07,063 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:07,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:28:07,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:28:07,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740 2024-11-25T07:28:07,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740 2024-11-25T07:28:07,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:28:07,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:28:07,070 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:28:07,073 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:28:07,079 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:28:07,080 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876621, jitterRate=0.11468218266963959}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:28:07,082 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34521, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T07:28:07,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732519687041Initializing all the Stores at 1732519687042 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519687042Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519687046 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519687046Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519687046Cleaning up temporary data from old regions at 1732519687069 (+23 ms)Region opened successfully at 1732519687084 (+15 ms) 2024-11-25T07:28:07,087 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:28:07,087 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:28:07,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:28:07,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:28:07,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:28:07,089 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:28:07,089 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519687087Disabling compacts and flushes for region at 1732519687087Disabling writes for close at 1732519687088 (+1 ms)Writing region close event to WAL at 1732519687089 (+1 ms)Closed at 1732519687089 2024-11-25T07:28:07,090 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38121 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5eb3d201e8c9,36015,1732519686061 2024-11-25T07:28:07,093 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38121 {}] master.ServerManager(517): Registering regionserver=5eb3d201e8c9,36015,1732519686061 2024-11-25T07:28:07,093 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:28:07,093 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T07:28:07,102 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T07:28:07,109 DEBUG [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6 2024-11-25T07:28:07,109 DEBUG [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34173 2024-11-25T07:28:07,109 DEBUG [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T07:28:07,112 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:28:07,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:28:07,114 DEBUG [RS:0;5eb3d201e8c9:36015 {}] zookeeper.ZKUtil(111): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5eb3d201e8c9,36015,1732519686061 2024-11-25T07:28:07,114 WARN [RS:0;5eb3d201e8c9:36015 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:28:07,114 INFO [RS:0;5eb3d201e8c9:36015 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:28:07,115 DEBUG [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061 2024-11-25T07:28:07,115 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T07:28:07,116 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5eb3d201e8c9,36015,1732519686061] 2024-11-25T07:28:07,142 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T07:28:07,155 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T07:28:07,160 INFO [RS:0;5eb3d201e8c9:36015 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T07:28:07,161 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,161 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T07:28:07,168 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T07:28:07,169 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,169 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:28:07,169 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:28:07,170 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:28:07,170 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:28:07,170 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:28:07,170 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:28:07,171 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:28:07,171 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:28:07,171 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:28:07,171 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:28:07,171 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:28:07,172 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:28:07,172 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:28:07,172 DEBUG [RS:0;5eb3d201e8c9:36015 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:28:07,173 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,173 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,173 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,174 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,174 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,174 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,36015,1732519686061-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:28:07,194 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T07:28:07,196 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,36015,1732519686061-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,196 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,196 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.Replication(171): 5eb3d201e8c9,36015,1732519686061 started 2024-11-25T07:28:07,213 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,214 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(1482): Serving as 5eb3d201e8c9,36015,1732519686061, RpcServer on 5eb3d201e8c9/172.17.0.2:36015, sessionid=0x1014e06540a0001 2024-11-25T07:28:07,214 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T07:28:07,214 DEBUG [RS:0;5eb3d201e8c9:36015 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5eb3d201e8c9,36015,1732519686061 2024-11-25T07:28:07,215 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,36015,1732519686061' 2024-11-25T07:28:07,215 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T07:28:07,216 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T07:28:07,216 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T07:28:07,216 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T07:28:07,216 DEBUG [RS:0;5eb3d201e8c9:36015 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5eb3d201e8c9,36015,1732519686061 2024-11-25T07:28:07,217 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,36015,1732519686061' 2024-11-25T07:28:07,217 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T07:28:07,217 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T07:28:07,218 DEBUG [RS:0;5eb3d201e8c9:36015 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T07:28:07,218 INFO [RS:0;5eb3d201e8c9:36015 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T07:28:07,218 INFO [RS:0;5eb3d201e8c9:36015 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T07:28:07,266 WARN [5eb3d201e8c9:38121 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T07:28:07,326 INFO [RS:0;5eb3d201e8c9:36015 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C36015%2C1732519686061, suffix=, logDir=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061, archiveDir=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/oldWALs, maxLogs=32 2024-11-25T07:28:07,328 INFO [RS:0;5eb3d201e8c9:36015 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C36015%2C1732519686061.1732519687328 2024-11-25T07:28:07,337 INFO [RS:0;5eb3d201e8c9:36015 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519687328 2024-11-25T07:28:07,341 DEBUG [RS:0;5eb3d201e8c9:36015 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32801:32801),(127.0.0.1/127.0.0.1:33541:33541)] 2024-11-25T07:28:07,518 DEBUG [5eb3d201e8c9:38121 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T07:28:07,530 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5eb3d201e8c9,36015,1732519686061 2024-11-25T07:28:07,537 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,36015,1732519686061, state=OPENING 2024-11-25T07:28:07,542 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T07:28:07,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:28:07,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:28:07,544 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:28:07,544 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:28:07,546 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:28:07,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,36015,1732519686061}] 2024-11-25T07:28:07,723 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T07:28:07,727 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34015, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T07:28:07,737 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T07:28:07,738 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:28:07,741 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C36015%2C1732519686061.meta, suffix=.meta, logDir=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061, archiveDir=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/oldWALs, maxLogs=32 2024-11-25T07:28:07,743 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C36015%2C1732519686061.meta.1732519687743.meta 2024-11-25T07:28:07,750 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.meta.1732519687743.meta 2024-11-25T07:28:07,752 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33541:33541),(127.0.0.1/127.0.0.1:32801:32801)] 2024-11-25T07:28:07,755 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:28:07,757 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T07:28:07,759 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T07:28:07,764 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T07:28:07,768 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T07:28:07,769 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:28:07,769 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T07:28:07,769 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T07:28:07,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:28:07,774 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:28:07,774 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:07,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:28:07,775 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:28:07,776 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:28:07,776 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:07,777 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:28:07,777 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:28:07,778 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:28:07,778 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:07,779 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:28:07,779 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:28:07,780 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:28:07,780 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:07,781 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:28:07,781 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:28:07,782 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740 2024-11-25T07:28:07,785 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740 2024-11-25T07:28:07,788 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:28:07,788 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:28:07,789 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:28:07,791 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:28:07,793 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863504, jitterRate=0.09800292551517487}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:28:07,793 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T07:28:07,795 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732519687770Writing region info on filesystem at 1732519687770Initializing all the Stores at 1732519687772 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519687772Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519687772Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519687772Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519687772Cleaning up temporary data from old regions at 1732519687788 (+16 ms)Running coprocessor post-open hooks at 1732519687793 (+5 ms)Region opened successfully at 1732519687795 (+2 ms) 2024-11-25T07:28:07,802 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732519687714 2024-11-25T07:28:07,815 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T07:28:07,816 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T07:28:07,817 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5eb3d201e8c9,36015,1732519686061 2024-11-25T07:28:07,819 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,36015,1732519686061, state=OPEN 2024-11-25T07:28:07,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:28:07,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:28:07,828 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:28:07,828 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:28:07,828 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,36015,1732519686061 2024-11-25T07:28:07,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T07:28:07,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,36015,1732519686061 in 282 msec 2024-11-25T07:28:07,842 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T07:28:07,842 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 736 msec 2024-11-25T07:28:07,844 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:28:07,844 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T07:28:07,865 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:28:07,866 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,36015,1732519686061, seqNum=-1] 2024-11-25T07:28:07,888 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:28:07,891 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57293, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:28:07,913 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 992 msec 2024-11-25T07:28:07,913 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732519687913, completionTime=-1 2024-11-25T07:28:07,917 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T07:28:07,917 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T07:28:07,951 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T07:28:07,951 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732519747951 2024-11-25T07:28:07,951 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732519807951 2024-11-25T07:28:07,951 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 33 msec 2024-11-25T07:28:07,954 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,38121,1732519685358-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,954 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,38121,1732519685358-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,954 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,38121,1732519685358-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,956 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5eb3d201e8c9:38121, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,956 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,957 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:07,964 DEBUG [master/5eb3d201e8c9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T07:28:07,983 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.780sec 2024-11-25T07:28:07,985 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T07:28:07,986 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T07:28:07,987 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T07:28:07,988 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T07:28:07,988 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T07:28:07,989 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,38121,1732519685358-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:28:07,990 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,38121,1732519685358-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T07:28:07,998 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T07:28:08,000 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T07:28:08,000 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,38121,1732519685358-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:28:08,067 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62c7596a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:28:08,069 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-25T07:28:08,069 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-25T07:28:08,073 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5eb3d201e8c9,38121,-1 for getting cluster id 2024-11-25T07:28:08,076 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T07:28:08,083 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '120abb30-37f4-4b8b-81e6-18d04198756b' 2024-11-25T07:28:08,086 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T07:28:08,086 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "120abb30-37f4-4b8b-81e6-18d04198756b" 2024-11-25T07:28:08,089 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29842d21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:28:08,089 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5eb3d201e8c9,38121,-1] 2024-11-25T07:28:08,092 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T07:28:08,115 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:28:08,119 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40504, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T07:28:08,122 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@625dad7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:28:08,123 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:28:08,131 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,36015,1732519686061, seqNum=-1] 2024-11-25T07:28:08,131 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:28:08,136 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54788, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:28:08,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5eb3d201e8c9,38121,1732519685358 2024-11-25T07:28:08,163 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:28:08,170 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T07:28:08,174 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T07:28:08,179 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5eb3d201e8c9,38121,1732519685358 2024-11-25T07:28:08,181 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2bfd4db8 2024-11-25T07:28:08,182 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T07:28:08,184 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40506, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T07:28:08,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38121 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T07:28:08,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38121 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T07:28:08,190 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38121 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T07:28:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38121 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-25T07:28:08,199 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T07:28:08,201 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38121 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-25T07:28:08,201 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:08,204 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T07:28:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38121 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T07:28:08,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741835_1011 (size=389) 2024-11-25T07:28:08,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741835_1011 (size=389) 2024-11-25T07:28:08,252 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cb75a0848d31ad8558bf0db4854011cc, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6 2024-11-25T07:28:08,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741836_1012 (size=72) 2024-11-25T07:28:08,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741836_1012 (size=72) 2024-11-25T07:28:08,263 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:28:08,263 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing cb75a0848d31ad8558bf0db4854011cc, disabling compactions & flushes 2024-11-25T07:28:08,263 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:28:08,263 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:28:08,263 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. after waiting 0 ms 2024-11-25T07:28:08,263 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:28:08,263 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:28:08,263 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for cb75a0848d31ad8558bf0db4854011cc: Waiting for close lock at 1732519688263Disabling compacts and flushes for region at 1732519688263Disabling writes for close at 1732519688263Writing region close event to WAL at 1732519688263Closed at 1732519688263 2024-11-25T07:28:08,266 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T07:28:08,271 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732519688266"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732519688266"}]},"ts":"1732519688266"} 2024-11-25T07:28:08,276 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T07:28:08,278 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T07:28:08,281 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732519688278"}]},"ts":"1732519688278"} 2024-11-25T07:28:08,286 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-25T07:28:08,287 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=cb75a0848d31ad8558bf0db4854011cc, ASSIGN}] 2024-11-25T07:28:08,290 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=cb75a0848d31ad8558bf0db4854011cc, ASSIGN 2024-11-25T07:28:08,291 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=cb75a0848d31ad8558bf0db4854011cc, ASSIGN; state=OFFLINE, location=5eb3d201e8c9,36015,1732519686061; forceNewPlan=false, retain=false 2024-11-25T07:28:08,443 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cb75a0848d31ad8558bf0db4854011cc, regionState=OPENING, regionLocation=5eb3d201e8c9,36015,1732519686061 2024-11-25T07:28:08,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=cb75a0848d31ad8558bf0db4854011cc, ASSIGN because future has completed 2024-11-25T07:28:08,449 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cb75a0848d31ad8558bf0db4854011cc, server=5eb3d201e8c9,36015,1732519686061}] 2024-11-25T07:28:08,610 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:28:08,611 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => cb75a0848d31ad8558bf0db4854011cc, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc.', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:28:08,612 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:28:08,612 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:28:08,612 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:28:08,612 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:28:08,615 INFO [StoreOpener-cb75a0848d31ad8558bf0db4854011cc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:28:08,617 INFO [StoreOpener-cb75a0848d31ad8558bf0db4854011cc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cb75a0848d31ad8558bf0db4854011cc columnFamilyName info 2024-11-25T07:28:08,617 DEBUG [StoreOpener-cb75a0848d31ad8558bf0db4854011cc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:28:08,618 INFO [StoreOpener-cb75a0848d31ad8558bf0db4854011cc-1 {}] regionserver.HStore(327): Store=cb75a0848d31ad8558bf0db4854011cc/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:28:08,618 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:28:08,620 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:28:08,621 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:28:08,622 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:28:08,622 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:28:08,625 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:28:08,628 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:28:08,629 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened cb75a0848d31ad8558bf0db4854011cc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757687, jitterRate=-0.036551475524902344}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T07:28:08,629 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:28:08,630 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for cb75a0848d31ad8558bf0db4854011cc: Running coprocessor pre-open hook at 1732519688612Writing region info on filesystem at 1732519688612Initializing all the Stores at 1732519688614 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519688614Cleaning up temporary data from old regions at 1732519688622 (+8 ms)Running coprocessor post-open hooks at 1732519688629 (+7 ms)Region opened successfully at 1732519688630 (+1 ms) 2024-11-25T07:28:08,632 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc., pid=6, masterSystemTime=1732519688604 2024-11-25T07:28:08,636 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:28:08,636 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:28:08,638 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cb75a0848d31ad8558bf0db4854011cc, regionState=OPEN, openSeqNum=2, regionLocation=5eb3d201e8c9,36015,1732519686061 2024-11-25T07:28:08,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cb75a0848d31ad8558bf0db4854011cc, server=5eb3d201e8c9,36015,1732519686061 because future has completed 2024-11-25T07:28:08,647 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T07:28:08,647 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure cb75a0848d31ad8558bf0db4854011cc, server=5eb3d201e8c9,36015,1732519686061 in 194 msec 2024-11-25T07:28:08,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T07:28:08,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=cb75a0848d31ad8558bf0db4854011cc, ASSIGN in 360 msec 2024-11-25T07:28:08,653 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T07:28:08,653 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732519688653"}]},"ts":"1732519688653"} 2024-11-25T07:28:08,657 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-25T07:28:08,658 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T07:28:08,661 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 466 msec 2024-11-25T07:28:13,272 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-25T07:28:13,328 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T07:28:13,330 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-25T07:28:15,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T07:28:15,763 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-25T07:28:15,765 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-25T07:28:15,765 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-25T07:28:15,766 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:28:15,766 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-25T07:28:15,767 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-25T07:28:15,767 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-25T07:28:18,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38121 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T07:28:18,258 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-25T07:28:18,261 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-25T07:28:18,267 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-25T07:28:18,268 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:28:18,268 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C36015%2C1732519686061.1732519698268 2024-11-25T07:28:18,277 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:18,277 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:18,278 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:18,278 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:18,278 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:18,278 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519687328 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519698268 2024-11-25T07:28:18,282 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32801:32801),(127.0.0.1/127.0.0.1:33541:33541)] 2024-11-25T07:28:18,282 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519687328 is not closed yet, will try archiving it next time 2024-11-25T07:28:18,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741833_1009 (size=451) 2024-11-25T07:28:18,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741833_1009 (size=451) 2024-11-25T07:28:18,286 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519687328 to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/oldWALs/5eb3d201e8c9%2C36015%2C1732519686061.1732519687328 2024-11-25T07:28:18,291 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc., hostname=5eb3d201e8c9,36015,1732519686061, seqNum=2] 2024-11-25T07:28:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36015 {}] regionserver.HRegion(8855): Flush requested on cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:28:30,323 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cb75a0848d31ad8558bf0db4854011cc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T07:28:30,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/f6f9d5b198ec489cbf500cc04b0cbf3f is 1080, key is row0001/info:/1732519698293/Put/seqid=0 2024-11-25T07:28:30,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741838_1014 (size=12509) 2024-11-25T07:28:30,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741838_1014 (size=12509) 2024-11-25T07:28:30,396 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/f6f9d5b198ec489cbf500cc04b0cbf3f 2024-11-25T07:28:30,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/f6f9d5b198ec489cbf500cc04b0cbf3f as hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/f6f9d5b198ec489cbf500cc04b0cbf3f 2024-11-25T07:28:30,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/f6f9d5b198ec489cbf500cc04b0cbf3f, entries=7, sequenceid=11, filesize=12.2 K 2024-11-25T07:28:30,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for cb75a0848d31ad8558bf0db4854011cc in 137ms, sequenceid=11, compaction requested=false 2024-11-25T07:28:30,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cb75a0848d31ad8558bf0db4854011cc: 2024-11-25T07:28:34,412 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T07:28:38,334 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C36015%2C1732519686061.1732519718334 2024-11-25T07:28:38,545 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK], DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK]] 2024-11-25T07:28:38,545 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:38,545 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:38,545 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:38,546 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:38,546 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:38,546 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519698268 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519718334 2024-11-25T07:28:38,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741837_1013 (size=12399) 2024-11-25T07:28:38,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741837_1013 (size=12399) 2024-11-25T07:28:38,553 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32801:32801),(127.0.0.1/127.0.0.1:33541:33541)] 2024-11-25T07:28:38,756 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK], DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK]] 2024-11-25T07:28:40,960 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK], DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK]] 2024-11-25T07:28:43,165 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK], DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK]] 2024-11-25T07:28:45,369 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK], DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK]] 2024-11-25T07:28:45,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36015 {}] regionserver.HRegion(8855): Flush requested on cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:28:45,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cb75a0848d31ad8558bf0db4854011cc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T07:28:45,571 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK], DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK]] 2024-11-25T07:28:45,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/7563710bef8545589d30444c25fddcd8 is 1080, key is row0008/info:/1732519712322/Put/seqid=0 2024-11-25T07:28:45,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741840_1016 (size=12509) 2024-11-25T07:28:45,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741840_1016 (size=12509) 2024-11-25T07:28:45,585 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/7563710bef8545589d30444c25fddcd8 2024-11-25T07:28:45,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/7563710bef8545589d30444c25fddcd8 as hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/7563710bef8545589d30444c25fddcd8 2024-11-25T07:28:45,603 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/7563710bef8545589d30444c25fddcd8, entries=7, sequenceid=21, filesize=12.2 K 2024-11-25T07:28:45,805 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK], DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK]] 2024-11-25T07:28:45,805 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for cb75a0848d31ad8558bf0db4854011cc in 436ms, sequenceid=21, compaction requested=false 2024-11-25T07:28:45,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cb75a0848d31ad8558bf0db4854011cc: 2024-11-25T07:28:45,806 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-25T07:28:45,806 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:28:45,807 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/f6f9d5b198ec489cbf500cc04b0cbf3f because midkey is the same as first or last row 2024-11-25T07:28:47,573 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK], DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK]] 2024-11-25T07:28:48,026 INFO [master/5eb3d201e8c9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-25T07:28:48,026 INFO [master/5eb3d201e8c9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-25T07:28:49,778 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK], DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK]] 2024-11-25T07:28:49,781 WARN [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK], DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK]] 2024-11-25T07:28:49,782 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C36015%2C1732519686061:(num 1732519718334) roll requested 2024-11-25T07:28:49,783 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C36015%2C1732519686061.1732519729782 2024-11-25T07:28:49,994 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 209 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK], DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK]] 2024-11-25T07:28:49,994 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:49,994 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:49,994 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:49,995 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:49,995 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:28:49,995 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519718334 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519729782 2024-11-25T07:28:49,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741839_1015 (size=7739) 2024-11-25T07:28:49,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741839_1015 (size=7739) 2024-11-25T07:28:50,005 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33541:33541),(127.0.0.1/127.0.0.1:32801:32801)] 2024-11-25T07:28:50,005 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519718334 is not closed yet, will try archiving it next time 2024-11-25T07:28:50,005 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519698268 to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/oldWALs/5eb3d201e8c9%2C36015%2C1732519686061.1732519698268 2024-11-25T07:28:51,984 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:28:53,612 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region cb75a0848d31ad8558bf0db4854011cc, had cached 0 bytes from a total of 25018 2024-11-25T07:28:54,188 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:28:56,393 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:28:58,597 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:29:00,599 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T07:29:00,600 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C36015%2C1732519686061.1732519740600 2024-11-25T07:29:04,412 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T07:29:05,609 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:29:05,612 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:29:05,612 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C36015%2C1732519686061:(num 1732519740600) roll requested 2024-11-25T07:29:05,612 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:05,612 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:05,612 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:05,612 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:05,613 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:05,613 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519729782 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519740600 2024-11-25T07:29:05,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741841_1017 (size=4753) 2024-11-25T07:29:05,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741841_1017 (size=4753) 2024-11-25T07:29:05,621 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33541:33541),(127.0.0.1/127.0.0.1:32801:32801)] 2024-11-25T07:29:05,621 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519729782 is not closed yet, will try archiving it next time 2024-11-25T07:29:05,621 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C36015%2C1732519686061.1732519745621 2024-11-25T07:29:10,624 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:29:10,625 WARN [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:29:10,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36015 {}] regionserver.HRegion(8855): Flush requested on cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:29:10,625 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cb75a0848d31ad8558bf0db4854011cc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T07:29:10,630 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:29:10,631 WARN [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:29:12,626 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T07:29:15,627 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:29:15,627 WARN [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:29:15,628 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:15,628 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:15,628 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:15,628 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:15,628 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:15,629 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519740600 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519745621 2024-11-25T07:29:15,630 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33541:33541),(127.0.0.1/127.0.0.1:32801:32801)] 2024-11-25T07:29:15,630 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519740600 is not closed yet, will try archiving it next time 2024-11-25T07:29:15,630 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C36015%2C1732519686061:(num 1732519745621) roll requested 2024-11-25T07:29:15,630 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C36015%2C1732519686061.1732519755630 2024-11-25T07:29:15,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741842_1018 (size=1569) 2024-11-25T07:29:15,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741842_1018 (size=1569) 2024-11-25T07:29:15,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/70a8531bff8f421585612bdf18c3a96a is 1080, key is row0015/info:/1732519727371/Put/seqid=0 2024-11-25T07:29:15,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741844_1020 (size=12509) 2024-11-25T07:29:15,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741844_1020 (size=12509) 2024-11-25T07:29:15,644 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/70a8531bff8f421585612bdf18c3a96a 2024-11-25T07:29:15,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/70a8531bff8f421585612bdf18c3a96a as hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/70a8531bff8f421585612bdf18c3a96a 2024-11-25T07:29:15,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/70a8531bff8f421585612bdf18c3a96a, entries=7, sequenceid=31, filesize=12.2 K 2024-11-25T07:29:20,640 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:29:20,640 WARN [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:29:20,666 INFO [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:29:20,667 WARN [FSHLog-0-hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6-prefix:5eb3d201e8c9,36015,1732519686061 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42947,DS-6a3c74b1-7032-4e99-9591-6caa5e2d2cbb,DISK], DatanodeInfoWithStorage[127.0.0.1:42077,DS-c50db246-0a69-4f33-af37-aa71eb481602,DISK]] 2024-11-25T07:29:20,667 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for cb75a0848d31ad8558bf0db4854011cc in 10041ms, sequenceid=31, compaction requested=true 2024-11-25T07:29:20,667 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cb75a0848d31ad8558bf0db4854011cc: 2024-11-25T07:29:20,667 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,667 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-25T07:29:20,667 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:29:20,667 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,667 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/f6f9d5b198ec489cbf500cc04b0cbf3f because midkey is the same as first or last row 2024-11-25T07:29:20,668 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,668 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,668 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519745621 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519755630 2024-11-25T07:29:20,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cb75a0848d31ad8558bf0db4854011cc:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T07:29:20,672 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33541:33541),(127.0.0.1/127.0.0.1:32801:32801)] 2024-11-25T07:29:20,672 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519745621 is not closed yet, will try archiving it next time 2024-11-25T07:29:20,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741843_1019 (size=438) 2024-11-25T07:29:20,673 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519718334 to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/oldWALs/5eb3d201e8c9%2C36015%2C1732519686061.1732519718334 2024-11-25T07:29:20,672 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C36015%2C1732519686061:(num 1732519755630) roll requested 2024-11-25T07:29:20,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741843_1019 (size=438) 2024-11-25T07:29:20,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:29:20,673 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C36015%2C1732519686061.1732519760673 2024-11-25T07:29:20,675 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519729782 to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/oldWALs/5eb3d201e8c9%2C36015%2C1732519686061.1732519729782 2024-11-25T07:29:20,677 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T07:29:20,677 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519740600 to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/oldWALs/5eb3d201e8c9%2C36015%2C1732519686061.1732519740600 2024-11-25T07:29:20,680 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T07:29:20,681 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.HStore(1541): cb75a0848d31ad8558bf0db4854011cc/info is initiating minor compaction (all files) 2024-11-25T07:29:20,682 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,682 INFO [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of cb75a0848d31ad8558bf0db4854011cc/info in TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:29:20,682 INFO [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/f6f9d5b198ec489cbf500cc04b0cbf3f, hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/7563710bef8545589d30444c25fddcd8, hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/70a8531bff8f421585612bdf18c3a96a] into tmpdir=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp, totalSize=36.6 K 2024-11-25T07:29:20,684 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] compactions.Compactor(225): Compacting f6f9d5b198ec489cbf500cc04b0cbf3f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732519698293 2024-11-25T07:29:20,684 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7563710bef8545589d30444c25fddcd8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732519712322 2024-11-25T07:29:20,684 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,684 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,685 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,685 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,685 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519755630 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519760673 2024-11-25T07:29:20,685 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] compactions.Compactor(225): Compacting 70a8531bff8f421585612bdf18c3a96a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732519727371 2024-11-25T07:29:20,687 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33541:33541),(127.0.0.1/127.0.0.1:32801:32801)] 2024-11-25T07:29:20,687 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519745621 is not closed yet, will try archiving it next time 2024-11-25T07:29:20,687 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519755630 is not closed yet, will try archiving it next time 2024-11-25T07:29:20,687 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C36015%2C1732519686061.1732519760687 2024-11-25T07:29:20,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741845_1021 (size=93) 2024-11-25T07:29:20,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741845_1021 (size=93) 2024-11-25T07:29:20,703 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,703 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,704 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,704 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,704 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:20,704 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519760673 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519760687 2024-11-25T07:29:20,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741846_1022 (size=1258) 2024-11-25T07:29:20,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741846_1022 (size=1258) 2024-11-25T07:29:20,710 DEBUG [Close-WAL-Writer-2 {}] wal.AbstractFSWAL(879): hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519745621 is not closed yet, will try archiving it next time 2024-11-25T07:29:20,711 DEBUG [Close-WAL-Writer-2 {}] wal.AbstractFSWAL(879): hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519755630 is not closed yet, will try archiving it next time 2024-11-25T07:29:20,722 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33541:33541),(127.0.0.1/127.0.0.1:32801:32801)] 2024-11-25T07:29:20,722 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519745621 is not closed yet, will try archiving it next time 2024-11-25T07:29:20,722 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519755630 is not closed yet, will try archiving it next time 2024-11-25T07:29:20,739 INFO [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cb75a0848d31ad8558bf0db4854011cc#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:29:20,740 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/529a731e36c54571991be29806be8975 is 1080, key is row0001/info:/1732519698293/Put/seqid=0 2024-11-25T07:29:20,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741848_1024 (size=27710) 2024-11-25T07:29:20,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741848_1024 (size=27710) 2024-11-25T07:29:20,786 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/529a731e36c54571991be29806be8975 as hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/529a731e36c54571991be29806be8975 2024-11-25T07:29:20,815 INFO [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in cb75a0848d31ad8558bf0db4854011cc/info of cb75a0848d31ad8558bf0db4854011cc into 529a731e36c54571991be29806be8975(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:29:20,815 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for cb75a0848d31ad8558bf0db4854011cc: 2024-11-25T07:29:20,818 INFO [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc., storeName=cb75a0848d31ad8558bf0db4854011cc/info, priority=13, startTime=1732519760669; duration=0sec 2024-11-25T07:29:20,818 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-25T07:29:20,818 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:29:20,818 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/529a731e36c54571991be29806be8975 because midkey is the same as first or last row 2024-11-25T07:29:20,819 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-25T07:29:20,819 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:29:20,819 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/529a731e36c54571991be29806be8975 because midkey is the same as first or last row 2024-11-25T07:29:20,819 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-25T07:29:20,819 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:29:20,819 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/529a731e36c54571991be29806be8975 because midkey is the same as first or last row 2024-11-25T07:29:20,819 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:29:20,820 DEBUG [RS:0;5eb3d201e8c9:36015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cb75a0848d31ad8558bf0db4854011cc:info 2024-11-25T07:29:21,073 DEBUG [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(879): hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519755630 is not closed yet, will try archiving it next time 2024-11-25T07:29:21,074 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519745621 to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/oldWALs/5eb3d201e8c9%2C36015%2C1732519686061.1732519745621 2024-11-25T07:29:21,091 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/WALs/5eb3d201e8c9,36015,1732519686061/5eb3d201e8c9%2C36015%2C1732519686061.1732519755630 to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/oldWALs/5eb3d201e8c9%2C36015%2C1732519686061.1732519755630 2024-11-25T07:29:32,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36015 {}] regionserver.HRegion(8855): Flush requested on cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:29:32,716 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cb75a0848d31ad8558bf0db4854011cc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T07:29:32,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/28450a3e519f411489b0c756d1eefdb9 is 1080, key is row0022/info:/1732519760689/Put/seqid=0 2024-11-25T07:29:32,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741849_1025 (size=12509) 2024-11-25T07:29:32,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741849_1025 (size=12509) 2024-11-25T07:29:32,732 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/28450a3e519f411489b0c756d1eefdb9 2024-11-25T07:29:32,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/28450a3e519f411489b0c756d1eefdb9 as hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/28450a3e519f411489b0c756d1eefdb9 2024-11-25T07:29:32,749 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/28450a3e519f411489b0c756d1eefdb9, entries=7, sequenceid=42, filesize=12.2 K 2024-11-25T07:29:32,751 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for cb75a0848d31ad8558bf0db4854011cc in 35ms, sequenceid=42, compaction requested=false 2024-11-25T07:29:32,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cb75a0848d31ad8558bf0db4854011cc: 2024-11-25T07:29:32,751 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-25T07:29:32,751 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:29:32,752 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/529a731e36c54571991be29806be8975 because midkey is the same as first or last row 2024-11-25T07:29:34,412 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T07:29:38,612 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region cb75a0848d31ad8558bf0db4854011cc, had cached 0 bytes from a total of 40219 2024-11-25T07:29:40,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T07:29:40,729 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:29:40,729 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:29:40,736 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:29:40,737 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:29:40,737 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T07:29:40,737 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T07:29:40,737 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1256453245, stopped=false 2024-11-25T07:29:40,737 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5eb3d201e8c9,38121,1732519685358 2024-11-25T07:29:40,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:29:40,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:29:40,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:40,739 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:29:40,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:40,740 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:29:40,740 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:29:40,740 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:29:40,740 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:29:40,741 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:29:40,741 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5eb3d201e8c9,36015,1732519686061' ***** 2024-11-25T07:29:40,741 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T07:29:40,741 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T07:29:40,742 INFO [RS:0;5eb3d201e8c9:36015 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T07:29:40,742 INFO [RS:0;5eb3d201e8c9:36015 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T07:29:40,742 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T07:29:40,742 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(3091): Received CLOSE for cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:29:40,743 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(959): stopping server 5eb3d201e8c9,36015,1732519686061 2024-11-25T07:29:40,743 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:29:40,743 INFO [RS:0;5eb3d201e8c9:36015 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5eb3d201e8c9:36015. 2024-11-25T07:29:40,743 DEBUG [RS:0;5eb3d201e8c9:36015 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:29:40,743 DEBUG [RS:0;5eb3d201e8c9:36015 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:29:40,743 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cb75a0848d31ad8558bf0db4854011cc, disabling compactions & flushes 2024-11-25T07:29:40,743 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T07:29:40,743 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:29:40,743 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T07:29:40,743 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T07:29:40,743 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:29:40,744 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. after waiting 0 ms 2024-11-25T07:29:40,744 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T07:29:40,744 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:29:40,744 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing cb75a0848d31ad8558bf0db4854011cc 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-25T07:29:40,744 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-25T07:29:40,744 DEBUG [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(1325): Online Regions={cb75a0848d31ad8558bf0db4854011cc=TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc., 1588230740=hbase:meta,,1.1588230740} 2024-11-25T07:29:40,744 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:29:40,744 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:29:40,744 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:29:40,744 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:29:40,744 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:29:40,744 DEBUG [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, cb75a0848d31ad8558bf0db4854011cc 2024-11-25T07:29:40,744 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-25T07:29:40,750 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/e825355603474852a6e5dd6f8e7fba4e is 1080, key is row0029/info:/1732519774719/Put/seqid=0 2024-11-25T07:29:40,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741850_1026 (size=8193) 2024-11-25T07:29:40,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741850_1026 (size=8193) 2024-11-25T07:29:40,760 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/e825355603474852a6e5dd6f8e7fba4e 2024-11-25T07:29:40,768 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/.tmp/info/9a3eb08f095e45a4a47f5b3a636242e3 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc./info:regioninfo/1732519688637/Put/seqid=0 2024-11-25T07:29:40,770 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/.tmp/info/e825355603474852a6e5dd6f8e7fba4e as hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/e825355603474852a6e5dd6f8e7fba4e 2024-11-25T07:29:40,782 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/e825355603474852a6e5dd6f8e7fba4e, entries=3, sequenceid=48, filesize=8.0 K 2024-11-25T07:29:40,784 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for cb75a0848d31ad8558bf0db4854011cc in 40ms, sequenceid=48, compaction requested=true 2024-11-25T07:29:40,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741851_1027 (size=7016) 2024-11-25T07:29:40,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741851_1027 (size=7016) 2024-11-25T07:29:40,787 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/.tmp/info/9a3eb08f095e45a4a47f5b3a636242e3 2024-11-25T07:29:40,793 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/f6f9d5b198ec489cbf500cc04b0cbf3f, hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/7563710bef8545589d30444c25fddcd8, hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/70a8531bff8f421585612bdf18c3a96a] to archive 2024-11-25T07:29:40,797 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T07:29:40,802 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/f6f9d5b198ec489cbf500cc04b0cbf3f to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/archive/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/f6f9d5b198ec489cbf500cc04b0cbf3f 2024-11-25T07:29:40,807 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/7563710bef8545589d30444c25fddcd8 to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/archive/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/7563710bef8545589d30444c25fddcd8 2024-11-25T07:29:40,809 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/70a8531bff8f421585612bdf18c3a96a to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/archive/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/info/70a8531bff8f421585612bdf18c3a96a 2024-11-25T07:29:40,830 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/.tmp/ns/8b83503ba8ac4ff39b3818d351c64d17 is 43, key is default/ns:d/1732519687896/Put/seqid=0 2024-11-25T07:29:40,830 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5eb3d201e8c9:38121 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-25T07:29:40,839 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [f6f9d5b198ec489cbf500cc04b0cbf3f=12509, 7563710bef8545589d30444c25fddcd8=12509, 70a8531bff8f421585612bdf18c3a96a=12509] 2024-11-25T07:29:40,859 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/default/TestLogRolling-testSlowSyncLogRolling/cb75a0848d31ad8558bf0db4854011cc/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-25T07:29:40,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741852_1028 (size=5153) 2024-11-25T07:29:40,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741852_1028 (size=5153) 2024-11-25T07:29:40,862 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/.tmp/ns/8b83503ba8ac4ff39b3818d351c64d17 2024-11-25T07:29:40,866 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:29:40,867 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cb75a0848d31ad8558bf0db4854011cc: Waiting for close lock at 1732519780743Running coprocessor pre-close hooks at 1732519780743Disabling compacts and flushes for region at 1732519780743Disabling writes for close at 1732519780744 (+1 ms)Obtaining lock to block concurrent updates at 1732519780744Preparing flush snapshotting stores in cb75a0848d31ad8558bf0db4854011cc at 1732519780744Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732519780744Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. at 1732519780745 (+1 ms)Flushing cb75a0848d31ad8558bf0db4854011cc/info: creating writer at 1732519780745Flushing cb75a0848d31ad8558bf0db4854011cc/info: appending metadata at 1732519780750 (+5 ms)Flushing cb75a0848d31ad8558bf0db4854011cc/info: closing flushed file at 1732519780750Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6252c15b: reopening flushed file at 1732519780769 (+19 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for cb75a0848d31ad8558bf0db4854011cc in 40ms, sequenceid=48, compaction requested=true at 1732519780784 (+15 ms)Writing region close event to WAL at 1732519780849 (+65 ms)Running coprocessor post-close hooks at 1732519780864 (+15 ms)Closed at 1732519780866 (+2 ms) 2024-11-25T07:29:40,867 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732519688186.cb75a0848d31ad8558bf0db4854011cc. 2024-11-25T07:29:40,895 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/.tmp/table/a4d07b4009e14b67acf61e1791e0697d is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732519688653/Put/seqid=0 2024-11-25T07:29:40,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741853_1029 (size=5396) 2024-11-25T07:29:40,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741853_1029 (size=5396) 2024-11-25T07:29:40,904 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/.tmp/table/a4d07b4009e14b67acf61e1791e0697d 2024-11-25T07:29:40,913 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/.tmp/info/9a3eb08f095e45a4a47f5b3a636242e3 as hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/info/9a3eb08f095e45a4a47f5b3a636242e3 2024-11-25T07:29:40,922 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/info/9a3eb08f095e45a4a47f5b3a636242e3, entries=10, sequenceid=11, filesize=6.9 K 2024-11-25T07:29:40,924 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/.tmp/ns/8b83503ba8ac4ff39b3818d351c64d17 as hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/ns/8b83503ba8ac4ff39b3818d351c64d17 2024-11-25T07:29:40,933 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/ns/8b83503ba8ac4ff39b3818d351c64d17, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T07:29:40,935 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/.tmp/table/a4d07b4009e14b67acf61e1791e0697d as hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/table/a4d07b4009e14b67acf61e1791e0697d 2024-11-25T07:29:40,945 DEBUG [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T07:29:40,946 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/table/a4d07b4009e14b67acf61e1791e0697d, entries=2, sequenceid=11, filesize=5.3 K 2024-11-25T07:29:40,948 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 204ms, sequenceid=11, compaction requested=false 2024-11-25T07:29:40,958 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T07:29:40,959 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:29:40,960 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:29:40,960 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519780744Running coprocessor pre-close hooks at 1732519780744Disabling compacts and flushes for region at 1732519780744Disabling writes for close at 1732519780744Obtaining lock to block concurrent updates at 1732519780745 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732519780745Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732519780745Flushing stores of hbase:meta,,1.1588230740 at 1732519780746 (+1 ms)Flushing 1588230740/info: creating writer at 1732519780746Flushing 1588230740/info: appending metadata at 1732519780767 (+21 ms)Flushing 1588230740/info: closing flushed file at 1732519780767Flushing 1588230740/ns: creating writer at 1732519780799 (+32 ms)Flushing 1588230740/ns: appending metadata at 1732519780829 (+30 ms)Flushing 1588230740/ns: closing flushed file at 1732519780829Flushing 1588230740/table: creating writer at 1732519780872 (+43 ms)Flushing 1588230740/table: appending metadata at 1732519780894 (+22 ms)Flushing 1588230740/table: closing flushed file at 1732519780894Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4728e24f: reopening flushed file at 1732519780912 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f31050c: reopening flushed file at 1732519780923 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@605d1849: reopening flushed file at 1732519780933 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 204ms, sequenceid=11, compaction requested=false at 1732519780948 (+15 ms)Writing region close event to WAL at 1732519780953 (+5 ms)Running coprocessor post-close hooks at 1732519780959 (+6 ms)Closed at 1732519780959 2024-11-25T07:29:40,960 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T07:29:41,145 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(976): stopping server 5eb3d201e8c9,36015,1732519686061; all regions closed. 2024-11-25T07:29:41,147 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,147 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,147 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,147 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,147 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741834_1010 (size=3066) 2024-11-25T07:29:41,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741834_1010 (size=3066) 2024-11-25T07:29:41,154 DEBUG [RS:0;5eb3d201e8c9:36015 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/oldWALs 2024-11-25T07:29:41,154 INFO [RS:0;5eb3d201e8c9:36015 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C36015%2C1732519686061.meta:.meta(num 1732519687743) 2024-11-25T07:29:41,154 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,155 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,155 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,155 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,155 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741847_1023 (size=12695) 2024-11-25T07:29:41,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741847_1023 (size=12695) 2024-11-25T07:29:41,162 DEBUG [RS:0;5eb3d201e8c9:36015 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/oldWALs 2024-11-25T07:29:41,162 INFO [RS:0;5eb3d201e8c9:36015 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C36015%2C1732519686061:(num 1732519760687) 2024-11-25T07:29:41,162 DEBUG [RS:0;5eb3d201e8c9:36015 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:29:41,162 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:29:41,162 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:29:41,162 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.ChoreService(370): Chore service for: regionserver/5eb3d201e8c9:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-25T07:29:41,163 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:29:41,163 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:29:41,163 INFO [RS:0;5eb3d201e8c9:36015 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36015 2024-11-25T07:29:41,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:29:41,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5eb3d201e8c9,36015,1732519686061 2024-11-25T07:29:41,167 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:29:41,170 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5eb3d201e8c9,36015,1732519686061] 2024-11-25T07:29:41,173 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5eb3d201e8c9,36015,1732519686061 already deleted, retry=false 2024-11-25T07:29:41,173 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5eb3d201e8c9,36015,1732519686061 expired; onlineServers=0 2024-11-25T07:29:41,173 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5eb3d201e8c9,38121,1732519685358' ***** 2024-11-25T07:29:41,173 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T07:29:41,173 INFO [M:0;5eb3d201e8c9:38121 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:29:41,173 INFO [M:0;5eb3d201e8c9:38121 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:29:41,174 DEBUG [M:0;5eb3d201e8c9:38121 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T07:29:41,174 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T07:29:41,174 DEBUG [M:0;5eb3d201e8c9:38121 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T07:29:41,174 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519687018 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519687018,5,FailOnTimeoutGroup] 2024-11-25T07:29:41,174 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519687021 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519687021,5,FailOnTimeoutGroup] 2024-11-25T07:29:41,174 INFO [M:0;5eb3d201e8c9:38121 {}] hbase.ChoreService(370): Chore service for: master/5eb3d201e8c9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T07:29:41,174 INFO [M:0;5eb3d201e8c9:38121 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:29:41,174 DEBUG [M:0;5eb3d201e8c9:38121 {}] master.HMaster(1795): Stopping service threads 2024-11-25T07:29:41,174 INFO [M:0;5eb3d201e8c9:38121 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T07:29:41,174 INFO [M:0;5eb3d201e8c9:38121 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:29:41,175 INFO [M:0;5eb3d201e8c9:38121 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T07:29:41,175 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T07:29:41,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T07:29:41,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:41,176 DEBUG [M:0;5eb3d201e8c9:38121 {}] zookeeper.ZKUtil(347): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T07:29:41,176 WARN [M:0;5eb3d201e8c9:38121 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T07:29:41,177 INFO [M:0;5eb3d201e8c9:38121 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/.lastflushedseqids 2024-11-25T07:29:41,179 INFO [regionserver/5eb3d201e8c9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:29:41,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741854_1030 (size=130) 2024-11-25T07:29:41,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741854_1030 (size=130) 2024-11-25T07:29:41,191 INFO [M:0;5eb3d201e8c9:38121 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T07:29:41,191 INFO [M:0;5eb3d201e8c9:38121 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T07:29:41,191 DEBUG [M:0;5eb3d201e8c9:38121 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:29:41,191 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:41,191 DEBUG [M:0;5eb3d201e8c9:38121 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:41,192 DEBUG [M:0;5eb3d201e8c9:38121 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:29:41,192 DEBUG [M:0;5eb3d201e8c9:38121 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:41,192 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-25T07:29:41,211 DEBUG [M:0;5eb3d201e8c9:38121 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d2a5c8ca0a374a6ca004e103ec10c978 is 82, key is hbase:meta,,1/info:regioninfo/1732519687816/Put/seqid=0 2024-11-25T07:29:41,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741855_1031 (size=5672) 2024-11-25T07:29:41,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741855_1031 (size=5672) 2024-11-25T07:29:41,217 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d2a5c8ca0a374a6ca004e103ec10c978 2024-11-25T07:29:41,242 DEBUG [M:0;5eb3d201e8c9:38121 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f968f426c00e43db98b78a4acb4abb44 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732519688660/Put/seqid=0 2024-11-25T07:29:41,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741856_1032 (size=6247) 2024-11-25T07:29:41,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741856_1032 (size=6247) 2024-11-25T07:29:41,251 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f968f426c00e43db98b78a4acb4abb44 2024-11-25T07:29:41,259 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f968f426c00e43db98b78a4acb4abb44 2024-11-25T07:29:41,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:29:41,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36015-0x1014e06540a0001, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:29:41,271 INFO [RS:0;5eb3d201e8c9:36015 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:29:41,271 INFO [RS:0;5eb3d201e8c9:36015 {}] regionserver.HRegionServer(1031): Exiting; stopping=5eb3d201e8c9,36015,1732519686061; zookeeper connection closed. 2024-11-25T07:29:41,271 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2c277c3e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2c277c3e 2024-11-25T07:29:41,272 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T07:29:41,282 DEBUG [M:0;5eb3d201e8c9:38121 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4c32be79f849432b8fc749c2f7aa7ae6 is 69, key is 5eb3d201e8c9,36015,1732519686061/rs:state/1732519687096/Put/seqid=0 2024-11-25T07:29:41,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741857_1033 (size=5156) 2024-11-25T07:29:41,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741857_1033 (size=5156) 2024-11-25T07:29:41,289 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4c32be79f849432b8fc749c2f7aa7ae6 2024-11-25T07:29:41,312 DEBUG [M:0;5eb3d201e8c9:38121 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9fb12b6b88004e378e90c602ef152273 is 52, key is load_balancer_on/state:d/1732519688167/Put/seqid=0 2024-11-25T07:29:41,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741858_1034 (size=5056) 2024-11-25T07:29:41,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741858_1034 (size=5056) 2024-11-25T07:29:41,319 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9fb12b6b88004e378e90c602ef152273 2024-11-25T07:29:41,327 DEBUG [M:0;5eb3d201e8c9:38121 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d2a5c8ca0a374a6ca004e103ec10c978 as hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d2a5c8ca0a374a6ca004e103ec10c978 2024-11-25T07:29:41,334 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d2a5c8ca0a374a6ca004e103ec10c978, entries=8, sequenceid=59, filesize=5.5 K 2024-11-25T07:29:41,336 DEBUG [M:0;5eb3d201e8c9:38121 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f968f426c00e43db98b78a4acb4abb44 as hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f968f426c00e43db98b78a4acb4abb44 2024-11-25T07:29:41,342 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f968f426c00e43db98b78a4acb4abb44 2024-11-25T07:29:41,343 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f968f426c00e43db98b78a4acb4abb44, entries=6, sequenceid=59, filesize=6.1 K 2024-11-25T07:29:41,344 DEBUG [M:0;5eb3d201e8c9:38121 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4c32be79f849432b8fc749c2f7aa7ae6 as hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4c32be79f849432b8fc749c2f7aa7ae6 2024-11-25T07:29:41,351 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4c32be79f849432b8fc749c2f7aa7ae6, entries=1, sequenceid=59, filesize=5.0 K 2024-11-25T07:29:41,352 DEBUG [M:0;5eb3d201e8c9:38121 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9fb12b6b88004e378e90c602ef152273 as hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9fb12b6b88004e378e90c602ef152273 2024-11-25T07:29:41,360 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9fb12b6b88004e378e90c602ef152273, entries=1, sequenceid=59, filesize=4.9 K 2024-11-25T07:29:41,362 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 169ms, sequenceid=59, compaction requested=false 2024-11-25T07:29:41,374 INFO [M:0;5eb3d201e8c9:38121 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:41,374 DEBUG [M:0;5eb3d201e8c9:38121 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519781191Disabling compacts and flushes for region at 1732519781191Disabling writes for close at 1732519781192 (+1 ms)Obtaining lock to block concurrent updates at 1732519781192Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732519781192Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732519781192Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732519781193 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732519781193Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732519781210 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732519781210Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732519781224 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732519781241 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732519781241Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732519781259 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732519781281 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732519781281Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732519781296 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732519781312 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732519781312Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4bcd2027: reopening flushed file at 1732519781326 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d715555: reopening flushed file at 1732519781335 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bff34c4: reopening flushed file at 1732519781343 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48603837: reopening flushed file at 1732519781351 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 169ms, sequenceid=59, compaction requested=false at 1732519781362 (+11 ms)Writing region close event to WAL at 1732519781374 (+12 ms)Closed at 1732519781374 2024-11-25T07:29:41,375 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,376 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,376 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,376 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,376 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:41,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741830_1006 (size=27973) 2024-11-25T07:29:41,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42077 is added to blk_1073741830_1006 (size=27973) 2024-11-25T07:29:41,380 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:29:41,380 INFO [M:0;5eb3d201e8c9:38121 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T07:29:41,381 INFO [M:0;5eb3d201e8c9:38121 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38121 2024-11-25T07:29:41,381 INFO [M:0;5eb3d201e8c9:38121 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:29:41,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:29:41,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38121-0x1014e06540a0000, quorum=127.0.0.1:49945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:29:41,484 INFO [M:0;5eb3d201e8c9:38121 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:29:41,498 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ca1952e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:29:41,502 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75639b0e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:29:41,502 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:29:41,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:29:41,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ec7bf2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/hadoop.log.dir/,STOPPED} 2024-11-25T07:29:41,511 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:29:41,511 WARN [BP-1306032755-172.17.0.2-1732519682243 heartbeating to localhost/127.0.0.1:34173 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:29:41,511 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:29:41,511 WARN [BP-1306032755-172.17.0.2-1732519682243 heartbeating to localhost/127.0.0.1:34173 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1306032755-172.17.0.2-1732519682243 (Datanode Uuid 7659ee3f-20c3-4e96-bf69-a8836b47a004) service to localhost/127.0.0.1:34173 2024-11-25T07:29:41,512 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/cluster_380e9090-6149-93f9-b7ff-bac561ebb92b/data/data3/current/BP-1306032755-172.17.0.2-1732519682243 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:29:41,513 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/cluster_380e9090-6149-93f9-b7ff-bac561ebb92b/data/data4/current/BP-1306032755-172.17.0.2-1732519682243 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:29:41,513 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:29:41,521 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ca8488f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:29:41,522 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@dc1ca4f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:29:41,522 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:29:41,522 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:29:41,523 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d13ec7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/hadoop.log.dir/,STOPPED} 2024-11-25T07:29:41,529 WARN [BP-1306032755-172.17.0.2-1732519682243 heartbeating to localhost/127.0.0.1:34173 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:29:41,529 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:29:41,529 WARN [BP-1306032755-172.17.0.2-1732519682243 heartbeating to localhost/127.0.0.1:34173 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1306032755-172.17.0.2-1732519682243 (Datanode Uuid 3be5ebb6-56e9-435b-a394-794943739344) service to localhost/127.0.0.1:34173 2024-11-25T07:29:41,529 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:29:41,530 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/cluster_380e9090-6149-93f9-b7ff-bac561ebb92b/data/data1/current/BP-1306032755-172.17.0.2-1732519682243 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:29:41,530 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/cluster_380e9090-6149-93f9-b7ff-bac561ebb92b/data/data2/current/BP-1306032755-172.17.0.2-1732519682243 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:29:41,537 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:29:41,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:29:41,558 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:29:41,558 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:29:41,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:29:41,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/hadoop.log.dir/,STOPPED} 2024-11-25T07:29:41,574 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T07:29:41,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T07:29:41,621 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=76 (was 12) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34173 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/5eb3d201e8c9:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@503634f0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34173 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34173 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34173 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34173 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/5eb3d201e8c9:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34173 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/5eb3d201e8c9:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34173 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:34173 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=141 (was 193), ProcessCount=11 (was 11), AvailableMemoryMB=8182 (was 8509) 2024-11-25T07:29:41,628 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=77, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=141, ProcessCount=11, AvailableMemoryMB=8181 2024-11-25T07:29:41,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T07:29:41,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/hadoop.log.dir so I do NOT create it in target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59 2024-11-25T07:29:41,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7d598503-c04f-ac43-02e9-588a74f5df79/hadoop.tmp.dir so I do NOT create it in target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59 2024-11-25T07:29:41,629 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/cluster_15ecff01-1ff8-5875-b518-d4941bb33cab, deleteOnExit=true 2024-11-25T07:29:41,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T07:29:41,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/test.cache.data in system properties and HBase conf 2024-11-25T07:29:41,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T07:29:41,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/hadoop.log.dir in system properties and HBase conf 2024-11-25T07:29:41,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T07:29:41,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T07:29:41,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T07:29:41,630 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T07:29:41,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:29:41,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:29:41,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T07:29:41,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:29:41,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T07:29:41,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T07:29:41,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:29:41,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:29:41,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T07:29:41,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/nfs.dump.dir in system properties and HBase conf 2024-11-25T07:29:41,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/java.io.tmpdir in system properties and HBase conf 2024-11-25T07:29:41,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:29:41,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T07:29:41,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T07:29:41,646 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:29:41,765 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:29:41,786 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:29:41,793 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:29:41,793 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:29:41,793 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:29:41,794 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:29:41,795 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46d26a79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:29:41,795 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59505eb5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:29:41,941 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1edca743{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/java.io.tmpdir/jetty-localhost-41171-hadoop-hdfs-3_4_1-tests_jar-_-any-1837245407435337307/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:29:41,942 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c443180{HTTP/1.1, (http/1.1)}{localhost:41171} 2024-11-25T07:29:41,942 INFO [Time-limited test {}] server.Server(415): Started @101789ms 2024-11-25T07:29:41,957 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:29:42,043 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:29:42,047 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:29:42,048 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:29:42,048 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:29:42,048 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:29:42,049 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69a0f3c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:29:42,050 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@469dec96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:29:42,178 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a15ed6a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/java.io.tmpdir/jetty-localhost-40459-hadoop-hdfs-3_4_1-tests_jar-_-any-17461749736818402852/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:29:42,179 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@b5aedfa{HTTP/1.1, (http/1.1)}{localhost:40459} 2024-11-25T07:29:42,179 INFO [Time-limited test {}] server.Server(415): Started @102026ms 2024-11-25T07:29:42,181 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:29:42,220 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:29:42,224 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:29:42,225 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:29:42,225 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:29:42,225 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:29:42,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@194f043a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:29:42,226 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@274298f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:29:42,312 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/cluster_15ecff01-1ff8-5875-b518-d4941bb33cab/data/data1/current/BP-15351427-172.17.0.2-1732519781670/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:42,313 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/cluster_15ecff01-1ff8-5875-b518-d4941bb33cab/data/data2/current/BP-15351427-172.17.0.2-1732519781670/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:42,337 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:29:42,340 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xacd716acb27318a with lease ID 0x22aa251bfb5cb192: Processing first storage report for DS-9ba9baeb-6519-457b-be3d-e4fa957b2615 from datanode DatanodeRegistration(127.0.0.1:40043, datanodeUuid=450e84a4-8df1-4970-bdec-4d0269d69b20, infoPort=45523, infoSecurePort=0, ipcPort=37095, storageInfo=lv=-57;cid=testClusterID;nsid=1205143481;c=1732519781670) 2024-11-25T07:29:42,340 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xacd716acb27318a with lease ID 0x22aa251bfb5cb192: from storage DS-9ba9baeb-6519-457b-be3d-e4fa957b2615 node DatanodeRegistration(127.0.0.1:40043, datanodeUuid=450e84a4-8df1-4970-bdec-4d0269d69b20, infoPort=45523, infoSecurePort=0, ipcPort=37095, storageInfo=lv=-57;cid=testClusterID;nsid=1205143481;c=1732519781670), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:29:42,340 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xacd716acb27318a with lease ID 0x22aa251bfb5cb192: Processing first storage report for DS-76124a4b-134b-4970-a2b4-4e8bd530646c from datanode DatanodeRegistration(127.0.0.1:40043, datanodeUuid=450e84a4-8df1-4970-bdec-4d0269d69b20, infoPort=45523, infoSecurePort=0, ipcPort=37095, storageInfo=lv=-57;cid=testClusterID;nsid=1205143481;c=1732519781670) 2024-11-25T07:29:42,340 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xacd716acb27318a with lease ID 0x22aa251bfb5cb192: from storage DS-76124a4b-134b-4970-a2b4-4e8bd530646c node DatanodeRegistration(127.0.0.1:40043, datanodeUuid=450e84a4-8df1-4970-bdec-4d0269d69b20, infoPort=45523, infoSecurePort=0, ipcPort=37095, storageInfo=lv=-57;cid=testClusterID;nsid=1205143481;c=1732519781670), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:29:42,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18492d7d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/java.io.tmpdir/jetty-localhost-44563-hadoop-hdfs-3_4_1-tests_jar-_-any-6338498176814736350/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:29:42,359 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30ebe7e3{HTTP/1.1, (http/1.1)}{localhost:44563} 2024-11-25T07:29:42,359 INFO [Time-limited test {}] server.Server(415): Started @102206ms 2024-11-25T07:29:42,361 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:29:42,476 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/cluster_15ecff01-1ff8-5875-b518-d4941bb33cab/data/data3/current/BP-15351427-172.17.0.2-1732519781670/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:42,476 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/cluster_15ecff01-1ff8-5875-b518-d4941bb33cab/data/data4/current/BP-15351427-172.17.0.2-1732519781670/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:42,504 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:29:42,511 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeecf1d1072ce9d73 with lease ID 0x22aa251bfb5cb193: Processing first storage report for DS-2a041da0-23ed-4752-814e-cc0a69f53ed2 from datanode DatanodeRegistration(127.0.0.1:43651, datanodeUuid=a126f5eb-ecad-4e96-9c61-e37de071c205, infoPort=33637, infoSecurePort=0, ipcPort=40651, storageInfo=lv=-57;cid=testClusterID;nsid=1205143481;c=1732519781670) 2024-11-25T07:29:42,512 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeecf1d1072ce9d73 with lease ID 0x22aa251bfb5cb193: from storage DS-2a041da0-23ed-4752-814e-cc0a69f53ed2 node DatanodeRegistration(127.0.0.1:43651, datanodeUuid=a126f5eb-ecad-4e96-9c61-e37de071c205, infoPort=33637, infoSecurePort=0, ipcPort=40651, storageInfo=lv=-57;cid=testClusterID;nsid=1205143481;c=1732519781670), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T07:29:42,512 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeecf1d1072ce9d73 with lease ID 0x22aa251bfb5cb193: Processing first storage report for DS-ea856e04-5ade-478e-bbee-f31957df77e1 from datanode DatanodeRegistration(127.0.0.1:43651, datanodeUuid=a126f5eb-ecad-4e96-9c61-e37de071c205, infoPort=33637, infoSecurePort=0, ipcPort=40651, storageInfo=lv=-57;cid=testClusterID;nsid=1205143481;c=1732519781670) 2024-11-25T07:29:42,512 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeecf1d1072ce9d73 with lease ID 0x22aa251bfb5cb193: from storage DS-ea856e04-5ade-478e-bbee-f31957df77e1 node DatanodeRegistration(127.0.0.1:43651, datanodeUuid=a126f5eb-ecad-4e96-9c61-e37de071c205, infoPort=33637, infoSecurePort=0, ipcPort=40651, storageInfo=lv=-57;cid=testClusterID;nsid=1205143481;c=1732519781670), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:29:42,591 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59 2024-11-25T07:29:42,593 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/cluster_15ecff01-1ff8-5875-b518-d4941bb33cab/zookeeper_0, clientPort=64974, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/cluster_15ecff01-1ff8-5875-b518-d4941bb33cab/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/cluster_15ecff01-1ff8-5875-b518-d4941bb33cab/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T07:29:42,594 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64974 2024-11-25T07:29:42,595 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:42,597 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:42,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:29:42,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:29:42,610 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448 with version=8 2024-11-25T07:29:42,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/hbase-staging 2024-11-25T07:29:42,612 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:29:42,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:42,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:42,612 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:29:42,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:42,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:29:42,612 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T07:29:42,613 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:29:42,613 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46543 2024-11-25T07:29:42,615 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46543 connecting to ZooKeeper ensemble=127.0.0.1:64974 2024-11-25T07:29:42,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:465430x0, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:29:42,622 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46543-0x1014e07d3510000 connected 2024-11-25T07:29:42,641 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:42,643 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:42,645 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:29:42,646 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448, hbase.cluster.distributed=false 2024-11-25T07:29:42,647 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:29:42,648 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46543 2024-11-25T07:29:42,648 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46543 2024-11-25T07:29:42,648 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46543 2024-11-25T07:29:42,649 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46543 2024-11-25T07:29:42,649 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46543 2024-11-25T07:29:42,666 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:29:42,666 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:42,666 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:42,666 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:29:42,666 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:42,666 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:29:42,667 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T07:29:42,667 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:29:42,667 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41805 2024-11-25T07:29:42,669 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41805 connecting to ZooKeeper ensemble=127.0.0.1:64974 2024-11-25T07:29:42,670 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:42,673 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:42,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:418050x0, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:29:42,678 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:418050x0, quorum=127.0.0.1:64974, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:29:42,678 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41805-0x1014e07d3510001 connected 2024-11-25T07:29:42,678 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T07:29:42,679 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T07:29:42,680 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T07:29:42,681 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:29:42,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41805 2024-11-25T07:29:42,686 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41805 2024-11-25T07:29:42,686 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41805 2024-11-25T07:29:42,687 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41805 2024-11-25T07:29:42,688 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41805 2024-11-25T07:29:42,703 DEBUG [M:0;5eb3d201e8c9:46543 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5eb3d201e8c9:46543 2024-11-25T07:29:42,703 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5eb3d201e8c9,46543,1732519782612 2024-11-25T07:29:42,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:29:42,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:29:42,705 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5eb3d201e8c9,46543,1732519782612 2024-11-25T07:29:42,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:42,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T07:29:42,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:42,708 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T07:29:42,708 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5eb3d201e8c9,46543,1732519782612 from backup master directory 2024-11-25T07:29:42,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5eb3d201e8c9,46543,1732519782612 2024-11-25T07:29:42,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:29:42,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:29:42,711 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:29:42,711 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5eb3d201e8c9,46543,1732519782612 2024-11-25T07:29:42,717 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/hbase.id] with ID: 841da762-4496-41b5-b825-a4d3cc41ae8d 2024-11-25T07:29:42,717 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/.tmp/hbase.id 2024-11-25T07:29:42,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:29:42,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:29:42,727 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/.tmp/hbase.id]:[hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/hbase.id] 2024-11-25T07:29:42,746 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:42,746 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T07:29:42,748 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-25T07:29:42,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:42,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:42,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:29:42,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:29:42,774 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T07:29:42,776 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T07:29:42,776 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:29:42,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:29:42,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:29:42,788 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store 2024-11-25T07:29:42,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:29:42,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:29:43,200 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:29:43,201 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:29:43,201 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:43,201 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:43,201 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:29:43,201 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:43,201 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:43,201 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519783201Disabling compacts and flushes for region at 1732519783201Disabling writes for close at 1732519783201Writing region close event to WAL at 1732519783201Closed at 1732519783201 2024-11-25T07:29:43,202 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/.initializing 2024-11-25T07:29:43,203 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/WALs/5eb3d201e8c9,46543,1732519782612 2024-11-25T07:29:43,206 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C46543%2C1732519782612, suffix=, logDir=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/WALs/5eb3d201e8c9,46543,1732519782612, archiveDir=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/oldWALs, maxLogs=10 2024-11-25T07:29:43,206 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C46543%2C1732519782612.1732519783206 2024-11-25T07:29:43,212 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/WALs/5eb3d201e8c9,46543,1732519782612/5eb3d201e8c9%2C46543%2C1732519782612.1732519783206 2024-11-25T07:29:43,213 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33637:33637),(127.0.0.1/127.0.0.1:45523:45523)] 2024-11-25T07:29:43,213 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:29:43,214 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:29:43,214 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:43,214 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:43,215 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:43,217 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T07:29:43,217 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:43,217 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:43,218 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:43,219 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T07:29:43,219 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:43,219 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:29:43,220 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:43,222 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T07:29:43,222 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:43,222 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:29:43,222 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:43,224 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T07:29:43,224 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:43,224 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:29:43,224 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:43,225 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:43,226 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:43,228 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:43,228 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:43,228 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T07:29:43,230 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:43,232 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:29:43,233 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830447, jitterRate=0.05596797168254852}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T07:29:43,234 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732519783214Initializing all the Stores at 1732519783215 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519783215Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519783215Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519783215Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519783215Cleaning up temporary data from old regions at 1732519783228 (+13 ms)Region opened successfully at 1732519783234 (+6 ms) 2024-11-25T07:29:43,234 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T07:29:43,238 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5261b152, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:29:43,239 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T07:29:43,239 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T07:29:43,239 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T07:29:43,240 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T07:29:43,240 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T07:29:43,241 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T07:29:43,241 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T07:29:43,243 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T07:29:43,244 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T07:29:43,246 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T07:29:43,246 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T07:29:43,247 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T07:29:43,248 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T07:29:43,248 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T07:29:43,249 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T07:29:43,251 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T07:29:43,252 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T07:29:43,253 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T07:29:43,256 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T07:29:43,257 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T07:29:43,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:29:43,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:29:43,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:43,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:43,259 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5eb3d201e8c9,46543,1732519782612, sessionid=0x1014e07d3510000, setting cluster-up flag (Was=false) 2024-11-25T07:29:43,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:43,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:43,269 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T07:29:43,270 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,46543,1732519782612 2024-11-25T07:29:43,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:43,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:43,279 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T07:29:43,280 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,46543,1732519782612 2024-11-25T07:29:43,281 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T07:29:43,283 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T07:29:43,284 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T07:29:43,284 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T07:29:43,284 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5eb3d201e8c9,46543,1732519782612 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T07:29:43,286 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:29:43,286 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:29:43,286 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:29:43,286 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:29:43,286 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5eb3d201e8c9:0, corePoolSize=10, maxPoolSize=10 2024-11-25T07:29:43,286 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:43,286 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:29:43,286 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:43,287 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732519813287 2024-11-25T07:29:43,288 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T07:29:43,288 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T07:29:43,288 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T07:29:43,288 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T07:29:43,288 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T07:29:43,288 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T07:29:43,288 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,288 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:29:43,288 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T07:29:43,288 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T07:29:43,289 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T07:29:43,289 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T07:29:43,289 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T07:29:43,289 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T07:29:43,289 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519783289,5,FailOnTimeoutGroup] 2024-11-25T07:29:43,289 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:43,289 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519783289,5,FailOnTimeoutGroup] 2024-11-25T07:29:43,289 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,290 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T07:29:43,290 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,290 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T07:29:43,290 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,292 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(746): ClusterId : 841da762-4496-41b5-b825-a4d3cc41ae8d 2024-11-25T07:29:43,292 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T07:29:43,294 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T07:29:43,295 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T07:29:43,297 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T07:29:43,297 DEBUG [RS:0;5eb3d201e8c9:41805 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cc8daf6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:29:43,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:29:43,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:29:43,299 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T07:29:43,299 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448 2024-11-25T07:29:43,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741832_1008 (size=32) 2024-11-25T07:29:43,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741832_1008 (size=32) 2024-11-25T07:29:43,308 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:29:43,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:29:43,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:29:43,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:43,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:43,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:29:43,313 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:29:43,313 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:43,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:43,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:29:43,315 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:29:43,315 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:43,316 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:43,316 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:29:43,316 DEBUG [RS:0;5eb3d201e8c9:41805 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5eb3d201e8c9:41805 2024-11-25T07:29:43,316 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T07:29:43,317 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T07:29:43,317 DEBUG [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T07:29:43,318 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(2659): reportForDuty to master=5eb3d201e8c9,46543,1732519782612 with port=41805, startcode=1732519782666 2024-11-25T07:29:43,318 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:29:43,318 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:43,318 DEBUG [RS:0;5eb3d201e8c9:41805 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T07:29:43,318 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:43,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:29:43,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/data/hbase/meta/1588230740 2024-11-25T07:29:43,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/data/hbase/meta/1588230740 2024-11-25T07:29:43,322 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:29:43,322 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:29:43,323 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42921, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T07:29:43,323 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:29:43,324 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46543 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5eb3d201e8c9,41805,1732519782666 2024-11-25T07:29:43,324 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46543 {}] master.ServerManager(517): Registering regionserver=5eb3d201e8c9,41805,1732519782666 2024-11-25T07:29:43,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:29:43,327 DEBUG [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448 2024-11-25T07:29:43,327 DEBUG [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40849 2024-11-25T07:29:43,327 DEBUG [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T07:29:43,328 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:29:43,329 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871808, jitterRate=0.10856148600578308}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:29:43,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:29:43,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732519783308Initializing all the Stores at 1732519783309 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519783309Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519783310 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519783310Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519783310Cleaning up temporary data from old regions at 1732519783322 (+12 ms)Region opened successfully at 1732519783330 (+8 ms) 2024-11-25T07:29:43,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:29:43,330 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:29:43,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:29:43,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:29:43,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:29:43,331 DEBUG [RS:0;5eb3d201e8c9:41805 {}] zookeeper.ZKUtil(111): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5eb3d201e8c9,41805,1732519782666 2024-11-25T07:29:43,331 WARN [RS:0;5eb3d201e8c9:41805 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:29:43,331 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:29:43,331 INFO [RS:0;5eb3d201e8c9:41805 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:29:43,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519783330Disabling compacts and flushes for region at 1732519783330Disabling writes for close at 1732519783330Writing region close event to WAL at 1732519783331 (+1 ms)Closed at 1732519783331 2024-11-25T07:29:43,331 DEBUG [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/WALs/5eb3d201e8c9,41805,1732519782666 2024-11-25T07:29:43,331 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5eb3d201e8c9,41805,1732519782666] 2024-11-25T07:29:43,333 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:29:43,333 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T07:29:43,333 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T07:29:43,335 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:29:43,336 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T07:29:43,337 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T07:29:43,340 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T07:29:43,343 INFO [RS:0;5eb3d201e8c9:41805 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T07:29:43,343 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,343 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T07:29:43,344 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T07:29:43,344 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,344 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:43,345 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:43,345 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:43,345 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:43,345 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:43,345 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:29:43,345 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:43,345 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:43,345 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:43,345 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:43,345 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:43,345 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:43,345 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:29:43,346 DEBUG [RS:0;5eb3d201e8c9:41805 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:29:43,346 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,346 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,346 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,346 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,346 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,346 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41805,1732519782666-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:29:43,371 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T07:29:43,371 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41805,1732519782666-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,371 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,372 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.Replication(171): 5eb3d201e8c9,41805,1732519782666 started 2024-11-25T07:29:43,388 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,388 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(1482): Serving as 5eb3d201e8c9,41805,1732519782666, RpcServer on 5eb3d201e8c9/172.17.0.2:41805, sessionid=0x1014e07d3510001 2024-11-25T07:29:43,388 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T07:29:43,388 DEBUG [RS:0;5eb3d201e8c9:41805 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5eb3d201e8c9,41805,1732519782666 2024-11-25T07:29:43,388 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,41805,1732519782666' 2024-11-25T07:29:43,388 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T07:29:43,389 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T07:29:43,390 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T07:29:43,390 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T07:29:43,390 DEBUG [RS:0;5eb3d201e8c9:41805 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5eb3d201e8c9,41805,1732519782666 2024-11-25T07:29:43,390 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,41805,1732519782666' 2024-11-25T07:29:43,390 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T07:29:43,390 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T07:29:43,391 DEBUG [RS:0;5eb3d201e8c9:41805 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T07:29:43,391 INFO [RS:0;5eb3d201e8c9:41805 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T07:29:43,391 INFO [RS:0;5eb3d201e8c9:41805 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T07:29:43,487 WARN [5eb3d201e8c9:46543 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T07:29:43,495 INFO [RS:0;5eb3d201e8c9:41805 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C41805%2C1732519782666, suffix=, logDir=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/WALs/5eb3d201e8c9,41805,1732519782666, archiveDir=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/oldWALs, maxLogs=32 2024-11-25T07:29:43,498 INFO [RS:0;5eb3d201e8c9:41805 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C41805%2C1732519782666.1732519783497 2024-11-25T07:29:43,506 INFO [RS:0;5eb3d201e8c9:41805 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/WALs/5eb3d201e8c9,41805,1732519782666/5eb3d201e8c9%2C41805%2C1732519782666.1732519783497 2024-11-25T07:29:43,508 DEBUG [RS:0;5eb3d201e8c9:41805 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45523:45523),(127.0.0.1/127.0.0.1:33637:33637)] 2024-11-25T07:29:43,737 DEBUG [5eb3d201e8c9:46543 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T07:29:43,738 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5eb3d201e8c9,41805,1732519782666 2024-11-25T07:29:43,740 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,41805,1732519782666, state=OPENING 2024-11-25T07:29:43,741 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T07:29:43,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:43,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:43,743 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:29:43,743 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:29:43,743 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:29:43,743 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,41805,1732519782666}] 2024-11-25T07:29:43,897 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T07:29:43,899 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35039, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T07:29:43,904 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T07:29:43,904 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:29:43,906 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C41805%2C1732519782666.meta, suffix=.meta, logDir=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/WALs/5eb3d201e8c9,41805,1732519782666, archiveDir=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/oldWALs, maxLogs=32 2024-11-25T07:29:43,908 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C41805%2C1732519782666.meta.1732519783908.meta 2024-11-25T07:29:43,915 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/WALs/5eb3d201e8c9,41805,1732519782666/5eb3d201e8c9%2C41805%2C1732519782666.meta.1732519783908.meta 2024-11-25T07:29:43,916 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45523:45523),(127.0.0.1/127.0.0.1:33637:33637)] 2024-11-25T07:29:43,917 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:29:43,918 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T07:29:43,918 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T07:29:43,918 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T07:29:43,918 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T07:29:43,918 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:29:43,918 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T07:29:43,918 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T07:29:43,920 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:29:43,921 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:29:43,921 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:43,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:43,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:29:43,923 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:29:43,923 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:43,923 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:43,923 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:29:43,924 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:29:43,924 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:43,925 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:43,925 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:29:43,926 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:29:43,926 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:43,926 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:43,926 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:29:43,927 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/data/hbase/meta/1588230740 2024-11-25T07:29:43,929 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/data/hbase/meta/1588230740 2024-11-25T07:29:43,930 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:29:43,930 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:29:43,931 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:29:43,932 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:29:43,933 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797464, jitterRate=0.014028728008270264}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:29:43,933 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T07:29:43,934 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732519783918Writing region info on filesystem at 1732519783918Initializing all the Stores at 1732519783920 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519783920Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519783920Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519783920Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519783920Cleaning up temporary data from old regions at 1732519783930 (+10 ms)Running coprocessor post-open hooks at 1732519783933 (+3 ms)Region opened successfully at 1732519783934 (+1 ms) 2024-11-25T07:29:43,935 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732519783897 2024-11-25T07:29:43,938 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T07:29:43,938 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T07:29:43,939 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5eb3d201e8c9,41805,1732519782666 2024-11-25T07:29:43,941 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,41805,1732519782666, state=OPEN 2024-11-25T07:29:43,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:29:43,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:29:43,946 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,41805,1732519782666 2024-11-25T07:29:43,947 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:29:43,947 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:29:43,950 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T07:29:43,950 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,41805,1732519782666 in 204 msec 2024-11-25T07:29:43,953 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T07:29:43,953 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 617 msec 2024-11-25T07:29:43,954 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:29:43,954 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T07:29:43,955 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:29:43,956 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,41805,1732519782666, seqNum=-1] 2024-11-25T07:29:43,956 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:29:43,957 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33719, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:29:43,964 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 679 msec 2024-11-25T07:29:43,964 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732519783964, completionTime=-1 2024-11-25T07:29:43,964 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T07:29:43,964 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T07:29:43,966 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T07:29:43,966 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732519843966 2024-11-25T07:29:43,966 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732519903966 2024-11-25T07:29:43,966 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-25T07:29:43,966 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,46543,1732519782612-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,966 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,46543,1732519782612-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,966 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,46543,1732519782612-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,967 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5eb3d201e8c9:46543, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,967 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,967 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,969 DEBUG [master/5eb3d201e8c9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T07:29:43,972 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.261sec 2024-11-25T07:29:43,972 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T07:29:43,972 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T07:29:43,972 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T07:29:43,972 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T07:29:43,972 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T07:29:43,972 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,46543,1732519782612-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:29:43,972 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,46543,1732519782612-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T07:29:43,975 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T07:29:43,975 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T07:29:43,975 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,46543,1732519782612-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:43,992 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@210f4ee9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:29:43,992 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5eb3d201e8c9,46543,-1 for getting cluster id 2024-11-25T07:29:43,992 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T07:29:43,997 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '841da762-4496-41b5-b825-a4d3cc41ae8d' 2024-11-25T07:29:43,998 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T07:29:43,998 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "841da762-4496-41b5-b825-a4d3cc41ae8d" 2024-11-25T07:29:43,999 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60420209, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:29:43,999 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5eb3d201e8c9,46543,-1] 2024-11-25T07:29:43,999 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T07:29:44,000 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:29:44,002 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56882, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T07:29:44,003 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bf88cd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:29:44,004 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:29:44,005 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,41805,1732519782666, seqNum=-1] 2024-11-25T07:29:44,006 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:29:44,007 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51642, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:29:44,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5eb3d201e8c9,46543,1732519782612 2024-11-25T07:29:44,010 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:44,014 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T07:29:44,014 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T07:29:44,015 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:29:44,015 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:29:44,015 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:29:44,015 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:29:44,015 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T07:29:44,015 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T07:29:44,015 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2023939679, stopped=false 2024-11-25T07:29:44,016 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5eb3d201e8c9,46543,1732519782612 2024-11-25T07:29:44,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:29:44,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:29:44,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:44,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:44,017 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:29:44,018 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:29:44,018 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:29:44,019 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:29:44,019 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:29:44,019 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:29:44,019 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5eb3d201e8c9,41805,1732519782666' ***** 2024-11-25T07:29:44,019 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T07:29:44,020 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T07:29:44,020 INFO [RS:0;5eb3d201e8c9:41805 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T07:29:44,020 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T07:29:44,020 INFO [RS:0;5eb3d201e8c9:41805 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T07:29:44,020 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(959): stopping server 5eb3d201e8c9,41805,1732519782666 2024-11-25T07:29:44,020 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:29:44,020 INFO [RS:0;5eb3d201e8c9:41805 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5eb3d201e8c9:41805. 2024-11-25T07:29:44,020 DEBUG [RS:0;5eb3d201e8c9:41805 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:29:44,020 DEBUG [RS:0;5eb3d201e8c9:41805 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:29:44,020 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T07:29:44,020 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T07:29:44,020 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T07:29:44,021 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T07:29:44,021 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-25T07:29:44,021 DEBUG [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-25T07:29:44,021 DEBUG [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T07:29:44,021 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:29:44,021 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:29:44,021 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:29:44,021 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:29:44,021 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:29:44,021 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-25T07:29:44,040 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/data/hbase/meta/1588230740/.tmp/ns/d17040a33eda4b84bdd8e68629ea4bc8 is 43, key is default/ns:d/1732519783958/Put/seqid=0 2024-11-25T07:29:44,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741835_1011 (size=5153) 2024-11-25T07:29:44,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741835_1011 (size=5153) 2024-11-25T07:29:44,047 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/data/hbase/meta/1588230740/.tmp/ns/d17040a33eda4b84bdd8e68629ea4bc8 2024-11-25T07:29:44,054 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/data/hbase/meta/1588230740/.tmp/ns/d17040a33eda4b84bdd8e68629ea4bc8 as hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/data/hbase/meta/1588230740/ns/d17040a33eda4b84bdd8e68629ea4bc8 2024-11-25T07:29:44,060 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/data/hbase/meta/1588230740/ns/d17040a33eda4b84bdd8e68629ea4bc8, entries=2, sequenceid=6, filesize=5.0 K 2024-11-25T07:29:44,062 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false 2024-11-25T07:29:44,062 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T07:29:44,067 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-25T07:29:44,068 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:29:44,068 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:29:44,068 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519784021Running coprocessor pre-close hooks at 1732519784021Disabling compacts and flushes for region at 1732519784021Disabling writes for close at 1732519784021Obtaining lock to block concurrent updates at 1732519784021Preparing flush snapshotting stores in 1588230740 at 1732519784021Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732519784022 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732519784023 (+1 ms)Flushing 1588230740/ns: creating writer at 1732519784023Flushing 1588230740/ns: appending metadata at 1732519784039 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732519784039Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fa57c0a: reopening flushed file at 1732519784053 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false at 1732519784062 (+9 ms)Writing region close event to WAL at 1732519784063 (+1 ms)Running coprocessor post-close hooks at 1732519784068 (+5 ms)Closed at 1732519784068 2024-11-25T07:29:44,068 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T07:29:44,221 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(976): stopping server 5eb3d201e8c9,41805,1732519782666; all regions closed. 2024-11-25T07:29:44,222 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,222 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,222 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,222 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,223 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741834_1010 (size=1152) 2024-11-25T07:29:44,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741834_1010 (size=1152) 2024-11-25T07:29:44,229 DEBUG [RS:0;5eb3d201e8c9:41805 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/oldWALs 2024-11-25T07:29:44,229 INFO [RS:0;5eb3d201e8c9:41805 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C41805%2C1732519782666.meta:.meta(num 1732519783908) 2024-11-25T07:29:44,229 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,229 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,229 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,230 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,230 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741833_1009 (size=93) 2024-11-25T07:29:44,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741833_1009 (size=93) 2024-11-25T07:29:44,235 DEBUG [RS:0;5eb3d201e8c9:41805 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/oldWALs 2024-11-25T07:29:44,235 INFO [RS:0;5eb3d201e8c9:41805 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C41805%2C1732519782666:(num 1732519783497) 2024-11-25T07:29:44,235 DEBUG [RS:0;5eb3d201e8c9:41805 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:29:44,235 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:29:44,236 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:29:44,236 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.ChoreService(370): Chore service for: regionserver/5eb3d201e8c9:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T07:29:44,236 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:29:44,236 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:29:44,236 INFO [RS:0;5eb3d201e8c9:41805 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41805 2024-11-25T07:29:44,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5eb3d201e8c9,41805,1732519782666 2024-11-25T07:29:44,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:29:44,238 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:29:44,240 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5eb3d201e8c9,41805,1732519782666] 2024-11-25T07:29:44,243 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5eb3d201e8c9,41805,1732519782666 already deleted, retry=false 2024-11-25T07:29:44,243 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5eb3d201e8c9,41805,1732519782666 expired; onlineServers=0 2024-11-25T07:29:44,243 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5eb3d201e8c9,46543,1732519782612' ***** 2024-11-25T07:29:44,243 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T07:29:44,243 INFO [M:0;5eb3d201e8c9:46543 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:29:44,243 INFO [M:0;5eb3d201e8c9:46543 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:29:44,243 DEBUG [M:0;5eb3d201e8c9:46543 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T07:29:44,243 DEBUG [M:0;5eb3d201e8c9:46543 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T07:29:44,243 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T07:29:44,243 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519783289 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519783289,5,FailOnTimeoutGroup] 2024-11-25T07:29:44,244 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519783289 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519783289,5,FailOnTimeoutGroup] 2024-11-25T07:29:44,244 INFO [M:0;5eb3d201e8c9:46543 {}] hbase.ChoreService(370): Chore service for: master/5eb3d201e8c9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T07:29:44,244 INFO [M:0;5eb3d201e8c9:46543 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:29:44,244 DEBUG [M:0;5eb3d201e8c9:46543 {}] master.HMaster(1795): Stopping service threads 2024-11-25T07:29:44,244 INFO [M:0;5eb3d201e8c9:46543 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T07:29:44,245 INFO [M:0;5eb3d201e8c9:46543 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:29:44,245 INFO [M:0;5eb3d201e8c9:46543 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T07:29:44,245 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T07:29:44,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T07:29:44,245 DEBUG [M:0;5eb3d201e8c9:46543 {}] zookeeper.ZKUtil(347): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T07:29:44,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:44,245 WARN [M:0;5eb3d201e8c9:46543 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T07:29:44,246 INFO [M:0;5eb3d201e8c9:46543 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/.lastflushedseqids 2024-11-25T07:29:44,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741836_1012 (size=108) 2024-11-25T07:29:44,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741836_1012 (size=108) 2024-11-25T07:29:44,254 INFO [M:0;5eb3d201e8c9:46543 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T07:29:44,254 INFO [M:0;5eb3d201e8c9:46543 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T07:29:44,255 DEBUG [M:0;5eb3d201e8c9:46543 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:29:44,255 INFO [M:0;5eb3d201e8c9:46543 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:44,255 DEBUG [M:0;5eb3d201e8c9:46543 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:44,255 DEBUG [M:0;5eb3d201e8c9:46543 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:29:44,255 DEBUG [M:0;5eb3d201e8c9:46543 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:44,255 INFO [M:0;5eb3d201e8c9:46543 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-25T07:29:44,282 DEBUG [M:0;5eb3d201e8c9:46543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f85a861935c84d3a8e56cd723cf3bae8 is 82, key is hbase:meta,,1/info:regioninfo/1732519783939/Put/seqid=0 2024-11-25T07:29:44,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741837_1013 (size=5672) 2024-11-25T07:29:44,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741837_1013 (size=5672) 2024-11-25T07:29:44,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:29:44,340 INFO [RS:0;5eb3d201e8c9:41805 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:29:44,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41805-0x1014e07d3510001, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:29:44,340 INFO [RS:0;5eb3d201e8c9:41805 {}] regionserver.HRegionServer(1031): Exiting; stopping=5eb3d201e8c9,41805,1732519782666; zookeeper connection closed. 2024-11-25T07:29:44,341 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@9b6063e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@9b6063e 2024-11-25T07:29:44,341 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T07:29:44,690 INFO [M:0;5eb3d201e8c9:46543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f85a861935c84d3a8e56cd723cf3bae8 2024-11-25T07:29:44,725 DEBUG [M:0;5eb3d201e8c9:46543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0afee6ea9b684c79ada5c77d34c93a51 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732519783963/Put/seqid=0 2024-11-25T07:29:44,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741838_1014 (size=5275) 2024-11-25T07:29:44,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741838_1014 (size=5275) 2024-11-25T07:29:44,742 INFO [M:0;5eb3d201e8c9:46543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0afee6ea9b684c79ada5c77d34c93a51 2024-11-25T07:29:44,777 DEBUG [M:0;5eb3d201e8c9:46543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e4ebbaba4e0d417fa5c8f4245c3fab44 is 69, key is 5eb3d201e8c9,41805,1732519782666/rs:state/1732519783325/Put/seqid=0 2024-11-25T07:29:44,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741839_1015 (size=5156) 2024-11-25T07:29:44,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741839_1015 (size=5156) 2024-11-25T07:29:44,786 INFO [M:0;5eb3d201e8c9:46543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e4ebbaba4e0d417fa5c8f4245c3fab44 2024-11-25T07:29:44,818 DEBUG [M:0;5eb3d201e8c9:46543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/72b7bf83621c4cddbb8a5895ae99b963 is 52, key is load_balancer_on/state:d/1732519784013/Put/seqid=0 2024-11-25T07:29:44,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741840_1016 (size=5056) 2024-11-25T07:29:44,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741840_1016 (size=5056) 2024-11-25T07:29:44,826 INFO [M:0;5eb3d201e8c9:46543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/72b7bf83621c4cddbb8a5895ae99b963 2024-11-25T07:29:44,834 DEBUG [M:0;5eb3d201e8c9:46543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f85a861935c84d3a8e56cd723cf3bae8 as hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f85a861935c84d3a8e56cd723cf3bae8 2024-11-25T07:29:44,843 INFO [M:0;5eb3d201e8c9:46543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f85a861935c84d3a8e56cd723cf3bae8, entries=8, sequenceid=29, filesize=5.5 K 2024-11-25T07:29:44,844 DEBUG [M:0;5eb3d201e8c9:46543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0afee6ea9b684c79ada5c77d34c93a51 as hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0afee6ea9b684c79ada5c77d34c93a51 2024-11-25T07:29:44,851 INFO [M:0;5eb3d201e8c9:46543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0afee6ea9b684c79ada5c77d34c93a51, entries=3, sequenceid=29, filesize=5.2 K 2024-11-25T07:29:44,853 DEBUG [M:0;5eb3d201e8c9:46543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e4ebbaba4e0d417fa5c8f4245c3fab44 as hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e4ebbaba4e0d417fa5c8f4245c3fab44 2024-11-25T07:29:44,859 INFO [M:0;5eb3d201e8c9:46543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e4ebbaba4e0d417fa5c8f4245c3fab44, entries=1, sequenceid=29, filesize=5.0 K 2024-11-25T07:29:44,861 DEBUG [M:0;5eb3d201e8c9:46543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/72b7bf83621c4cddbb8a5895ae99b963 as hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/72b7bf83621c4cddbb8a5895ae99b963 2024-11-25T07:29:44,870 INFO [M:0;5eb3d201e8c9:46543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/474c1614-10f3-cac6-e282-694513c50448/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/72b7bf83621c4cddbb8a5895ae99b963, entries=1, sequenceid=29, filesize=4.9 K 2024-11-25T07:29:44,872 INFO [M:0;5eb3d201e8c9:46543 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 616ms, sequenceid=29, compaction requested=false 2024-11-25T07:29:44,877 INFO [M:0;5eb3d201e8c9:46543 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:44,877 DEBUG [M:0;5eb3d201e8c9:46543 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519784255Disabling compacts and flushes for region at 1732519784255Disabling writes for close at 1732519784255Obtaining lock to block concurrent updates at 1732519784255Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732519784255Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732519784256 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732519784257 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732519784257Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732519784281 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732519784281Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732519784699 (+418 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732519784724 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732519784724Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732519784756 (+32 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732519784776 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732519784776Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732519784794 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732519784818 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732519784818Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6589dfd6: reopening flushed file at 1732519784833 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1020e9cc: reopening flushed file at 1732519784843 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55aaef52: reopening flushed file at 1732519784851 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2630664f: reopening flushed file at 1732519784860 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 616ms, sequenceid=29, compaction requested=false at 1732519784872 (+12 ms)Writing region close event to WAL at 1732519784877 (+5 ms)Closed at 1732519784877 2024-11-25T07:29:44,877 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,878 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,878 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,878 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,878 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:44,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40043 is added to blk_1073741830_1006 (size=10311) 2024-11-25T07:29:44,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43651 is added to blk_1073741830_1006 (size=10311) 2024-11-25T07:29:45,283 INFO [M:0;5eb3d201e8c9:46543 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T07:29:45,283 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:29:45,283 INFO [M:0;5eb3d201e8c9:46543 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46543 2024-11-25T07:29:45,284 INFO [M:0;5eb3d201e8c9:46543 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:29:45,347 INFO [regionserver/5eb3d201e8c9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:29:45,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:29:45,385 INFO [M:0;5eb3d201e8c9:46543 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:29:45,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46543-0x1014e07d3510000, quorum=127.0.0.1:64974, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:29:45,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18492d7d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:29:45,388 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30ebe7e3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:29:45,389 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:29:45,389 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@274298f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:29:45,389 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@194f043a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/hadoop.log.dir/,STOPPED} 2024-11-25T07:29:45,390 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:29:45,390 WARN [BP-15351427-172.17.0.2-1732519781670 heartbeating to localhost/127.0.0.1:40849 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:29:45,390 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:29:45,390 WARN [BP-15351427-172.17.0.2-1732519781670 heartbeating to localhost/127.0.0.1:40849 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-15351427-172.17.0.2-1732519781670 (Datanode Uuid a126f5eb-ecad-4e96-9c61-e37de071c205) service to localhost/127.0.0.1:40849 2024-11-25T07:29:45,391 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/cluster_15ecff01-1ff8-5875-b518-d4941bb33cab/data/data3/current/BP-15351427-172.17.0.2-1732519781670 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:29:45,392 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/cluster_15ecff01-1ff8-5875-b518-d4941bb33cab/data/data4/current/BP-15351427-172.17.0.2-1732519781670 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:29:45,392 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:29:45,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a15ed6a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:29:45,394 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@b5aedfa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:29:45,394 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:29:45,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@469dec96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:29:45,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69a0f3c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/hadoop.log.dir/,STOPPED} 2024-11-25T07:29:45,396 WARN [BP-15351427-172.17.0.2-1732519781670 heartbeating to localhost/127.0.0.1:40849 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:29:45,396 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:29:45,396 WARN [BP-15351427-172.17.0.2-1732519781670 heartbeating to localhost/127.0.0.1:40849 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-15351427-172.17.0.2-1732519781670 (Datanode Uuid 450e84a4-8df1-4970-bdec-4d0269d69b20) service to localhost/127.0.0.1:40849 2024-11-25T07:29:45,396 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:29:45,397 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/cluster_15ecff01-1ff8-5875-b518-d4941bb33cab/data/data1/current/BP-15351427-172.17.0.2-1732519781670 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:29:45,397 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/cluster_15ecff01-1ff8-5875-b518-d4941bb33cab/data/data2/current/BP-15351427-172.17.0.2-1732519781670 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:29:45,397 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:29:45,403 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1edca743{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:29:45,403 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c443180{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:29:45,403 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:29:45,404 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59505eb5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:29:45,404 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46d26a79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/hadoop.log.dir/,STOPPED} 2024-11-25T07:29:45,410 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T07:29:45,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T07:29:45,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T07:29:45,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/hadoop.log.dir so I do NOT create it in target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff 2024-11-25T07:29:45,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f7773ce-19da-48f7-9afa-f3e83df96e59/hadoop.tmp.dir so I do NOT create it in target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff 2024-11-25T07:29:45,426 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a, deleteOnExit=true 2024-11-25T07:29:45,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T07:29:45,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/test.cache.data in system properties and HBase conf 2024-11-25T07:29:45,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T07:29:45,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir in system properties and HBase conf 2024-11-25T07:29:45,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T07:29:45,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T07:29:45,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T07:29:45,427 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T07:29:45,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:29:45,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:29:45,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T07:29:45,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:29:45,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T07:29:45,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T07:29:45,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:29:45,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:29:45,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T07:29:45,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/nfs.dump.dir in system properties and HBase conf 2024-11-25T07:29:45,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/java.io.tmpdir in system properties and HBase conf 2024-11-25T07:29:45,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:29:45,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T07:29:45,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T07:29:45,443 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:29:45,524 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:29:45,532 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:29:45,538 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:29:45,538 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:29:45,538 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:29:45,539 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:29:45,540 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@142d24a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:29:45,540 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42b52d44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:29:45,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c00ef51{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/java.io.tmpdir/jetty-localhost-33785-hadoop-hdfs-3_4_1-tests_jar-_-any-16354936439836841921/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:29:45,663 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@73b9709e{HTTP/1.1, (http/1.1)}{localhost:33785} 2024-11-25T07:29:45,663 INFO [Time-limited test {}] server.Server(415): Started @105511ms 2024-11-25T07:29:45,681 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:29:45,753 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:29:45,756 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:29:45,757 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:29:45,757 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:29:45,757 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:29:45,757 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65dec1b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:29:45,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a107105{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:29:45,761 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:29:45,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T07:29:45,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-25T07:29:45,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-25T07:29:45,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:29:45,874 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de86657{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/java.io.tmpdir/jetty-localhost-40611-hadoop-hdfs-3_4_1-tests_jar-_-any-17582587031106443220/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:29:45,874 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:29:45,874 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6787773a{HTTP/1.1, (http/1.1)}{localhost:40611} 2024-11-25T07:29:45,875 INFO [Time-limited test {}] server.Server(415): Started @105722ms 2024-11-25T07:29:45,891 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:29:45,924 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T07:29:45,927 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:29:45,941 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:29:45,942 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:29:45,943 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:29:45,955 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:29:45,959 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:29:45,960 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:29:45,960 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:29:45,961 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:29:45,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51561b8f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:29:45,962 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@372d60ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:29:46,000 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data1/current/BP-673609853-172.17.0.2-1732519785461/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:46,000 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data2/current/BP-673609853-172.17.0.2-1732519785461/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:46,027 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:29:46,030 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3b3398ec3357c312 with lease ID 0x4f55d6bdd8ce110d: Processing first storage report for DS-23d58dea-54b0-41d9-a032-7e85b064f6c2 from datanode DatanodeRegistration(127.0.0.1:32821, datanodeUuid=fd7732db-5526-486e-931c-c37b0c35cc54, infoPort=41903, infoSecurePort=0, ipcPort=34923, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461) 2024-11-25T07:29:46,030 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b3398ec3357c312 with lease ID 0x4f55d6bdd8ce110d: from storage DS-23d58dea-54b0-41d9-a032-7e85b064f6c2 node DatanodeRegistration(127.0.0.1:32821, datanodeUuid=fd7732db-5526-486e-931c-c37b0c35cc54, infoPort=41903, infoSecurePort=0, ipcPort=34923, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:29:46,030 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3b3398ec3357c312 with lease ID 0x4f55d6bdd8ce110d: Processing first storage report for DS-bca8f1e1-2498-4968-b3a5-92d7da25b8f0 from datanode DatanodeRegistration(127.0.0.1:32821, datanodeUuid=fd7732db-5526-486e-931c-c37b0c35cc54, infoPort=41903, infoSecurePort=0, ipcPort=34923, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461) 2024-11-25T07:29:46,030 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b3398ec3357c312 with lease ID 0x4f55d6bdd8ce110d: from storage DS-bca8f1e1-2498-4968-b3a5-92d7da25b8f0 node DatanodeRegistration(127.0.0.1:32821, datanodeUuid=fd7732db-5526-486e-931c-c37b0c35cc54, infoPort=41903, infoSecurePort=0, ipcPort=34923, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:29:46,079 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e64a045{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/java.io.tmpdir/jetty-localhost-45525-hadoop-hdfs-3_4_1-tests_jar-_-any-13514660557298702281/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:29:46,079 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1495e1af{HTTP/1.1, (http/1.1)}{localhost:45525} 2024-11-25T07:29:46,079 INFO [Time-limited test {}] server.Server(415): Started @105926ms 2024-11-25T07:29:46,081 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:29:46,206 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data3/current/BP-673609853-172.17.0.2-1732519785461/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:46,206 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data4/current/BP-673609853-172.17.0.2-1732519785461/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:46,224 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:29:46,227 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x301d3af5fb286ab9 with lease ID 0x4f55d6bdd8ce110e: Processing first storage report for DS-2e563855-25ac-4d93-b585-1e5288732181 from datanode DatanodeRegistration(127.0.0.1:35967, datanodeUuid=0067738f-ec80-43c7-978e-8f5a2b9b9e66, infoPort=35341, infoSecurePort=0, ipcPort=39489, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461) 2024-11-25T07:29:46,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x301d3af5fb286ab9 with lease ID 0x4f55d6bdd8ce110e: from storage DS-2e563855-25ac-4d93-b585-1e5288732181 node DatanodeRegistration(127.0.0.1:35967, datanodeUuid=0067738f-ec80-43c7-978e-8f5a2b9b9e66, infoPort=35341, infoSecurePort=0, ipcPort=39489, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T07:29:46,227 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x301d3af5fb286ab9 with lease ID 0x4f55d6bdd8ce110e: Processing first storage report for DS-c2430039-5d2a-46d7-a7d1-8ea086e9ea37 from datanode DatanodeRegistration(127.0.0.1:35967, datanodeUuid=0067738f-ec80-43c7-978e-8f5a2b9b9e66, infoPort=35341, infoSecurePort=0, ipcPort=39489, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461) 2024-11-25T07:29:46,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x301d3af5fb286ab9 with lease ID 0x4f55d6bdd8ce110e: from storage DS-c2430039-5d2a-46d7-a7d1-8ea086e9ea37 node DatanodeRegistration(127.0.0.1:35967, datanodeUuid=0067738f-ec80-43c7-978e-8f5a2b9b9e66, infoPort=35341, infoSecurePort=0, ipcPort=39489, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:29:46,319 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff 2024-11-25T07:29:46,322 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/zookeeper_0, clientPort=56970, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T07:29:46,323 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56970 2024-11-25T07:29:46,323 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:46,325 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:46,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:29:46,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35967 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:29:46,336 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a with version=8 2024-11-25T07:29:46,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/hbase-staging 2024-11-25T07:29:46,338 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:29:46,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:46,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:46,338 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:29:46,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:46,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:29:46,338 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T07:29:46,338 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:29:46,339 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41597 2024-11-25T07:29:46,340 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41597 connecting to ZooKeeper ensemble=127.0.0.1:56970 2024-11-25T07:29:46,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:415970x0, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:29:46,348 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41597-0x1014e07e1e10000 connected 2024-11-25T07:29:46,365 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:46,367 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:46,369 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:29:46,370 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a, hbase.cluster.distributed=false 2024-11-25T07:29:46,371 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:29:46,372 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41597 2024-11-25T07:29:46,372 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41597 2024-11-25T07:29:46,376 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41597 2024-11-25T07:29:46,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41597 2024-11-25T07:29:46,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41597 2024-11-25T07:29:46,393 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:29:46,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:46,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:46,394 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:29:46,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:46,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:29:46,394 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T07:29:46,394 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:29:46,395 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44859 2024-11-25T07:29:46,396 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44859 connecting to ZooKeeper ensemble=127.0.0.1:56970 2024-11-25T07:29:46,397 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:46,398 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:46,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:448590x0, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:29:46,403 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44859-0x1014e07e1e10001 connected 2024-11-25T07:29:46,403 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:29:46,404 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T07:29:46,407 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T07:29:46,407 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T07:29:46,408 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:29:46,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44859 2024-11-25T07:29:46,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44859 2024-11-25T07:29:46,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44859 2024-11-25T07:29:46,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44859 2024-11-25T07:29:46,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44859 2024-11-25T07:29:46,424 DEBUG [M:0;5eb3d201e8c9:41597 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5eb3d201e8c9:41597 2024-11-25T07:29:46,424 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5eb3d201e8c9,41597,1732519786338 2024-11-25T07:29:46,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:29:46,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:29:46,427 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5eb3d201e8c9,41597,1732519786338 2024-11-25T07:29:46,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T07:29:46,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:46,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:46,430 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T07:29:46,430 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5eb3d201e8c9,41597,1732519786338 from backup master directory 2024-11-25T07:29:46,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5eb3d201e8c9,41597,1732519786338 2024-11-25T07:29:46,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:29:46,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:29:46,432 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:29:46,432 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5eb3d201e8c9,41597,1732519786338 2024-11-25T07:29:46,438 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/hbase.id] with ID: 3f9c7dea-9009-4d4c-a343-7dfcbad27125 2024-11-25T07:29:46,438 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/.tmp/hbase.id 2024-11-25T07:29:46,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:29:46,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35967 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:29:46,446 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/.tmp/hbase.id]:[hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/hbase.id] 2024-11-25T07:29:46,460 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:46,460 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T07:29:46,462 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T07:29:46,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:46,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:46,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35967 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:29:46,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:29:46,475 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T07:29:46,476 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T07:29:46,476 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:29:46,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35967 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:29:46,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:29:46,890 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store 2024-11-25T07:29:46,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35967 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:29:46,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:29:46,898 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:29:46,899 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:29:46,899 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:46,899 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:46,899 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:29:46,899 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:46,899 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:29:46,899 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519786899Disabling compacts and flushes for region at 1732519786899Disabling writes for close at 1732519786899Writing region close event to WAL at 1732519786899Closed at 1732519786899 2024-11-25T07:29:46,900 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/.initializing 2024-11-25T07:29:46,900 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/WALs/5eb3d201e8c9,41597,1732519786338 2024-11-25T07:29:46,903 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C41597%2C1732519786338, suffix=, logDir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/WALs/5eb3d201e8c9,41597,1732519786338, archiveDir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/oldWALs, maxLogs=10 2024-11-25T07:29:46,903 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C41597%2C1732519786338.1732519786903 2024-11-25T07:29:46,908 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/WALs/5eb3d201e8c9,41597,1732519786338/5eb3d201e8c9%2C41597%2C1732519786338.1732519786903 2024-11-25T07:29:46,909 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41903:41903),(127.0.0.1/127.0.0.1:35341:35341)] 2024-11-25T07:29:46,910 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:29:46,910 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:29:46,910 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:46,910 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:46,912 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:46,913 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T07:29:46,913 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:46,914 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:46,914 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:46,915 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T07:29:46,915 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:46,916 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:29:46,916 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:46,917 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T07:29:46,917 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:46,918 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:29:46,918 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:46,919 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T07:29:46,919 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:46,919 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:29:46,919 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:46,920 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:46,921 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:46,922 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:46,922 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:46,923 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T07:29:46,924 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:29:46,926 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:29:46,927 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=883314, jitterRate=0.12319198250770569}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T07:29:46,928 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732519786910Initializing all the Stores at 1732519786911 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519786911Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519786911Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519786912 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519786912Cleaning up temporary data from old regions at 1732519786922 (+10 ms)Region opened successfully at 1732519786928 (+6 ms) 2024-11-25T07:29:46,928 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T07:29:46,932 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@528d37ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:29:46,933 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T07:29:46,933 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T07:29:46,933 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T07:29:46,933 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T07:29:46,934 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T07:29:46,934 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T07:29:46,934 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T07:29:46,936 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T07:29:46,937 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T07:29:46,938 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T07:29:46,939 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T07:29:46,939 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T07:29:46,946 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T07:29:46,946 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T07:29:46,947 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T07:29:46,948 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T07:29:46,949 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T07:29:46,950 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T07:29:46,952 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T07:29:46,953 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T07:29:46,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:29:46,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:29:46,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:46,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:46,957 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5eb3d201e8c9,41597,1732519786338, sessionid=0x1014e07e1e10000, setting cluster-up flag (Was=false) 2024-11-25T07:29:46,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:46,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:46,965 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T07:29:46,966 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,41597,1732519786338 2024-11-25T07:29:46,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:46,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:46,974 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T07:29:46,975 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,41597,1732519786338 2024-11-25T07:29:46,976 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T07:29:46,978 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T07:29:46,979 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T07:29:46,979 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T07:29:46,979 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5eb3d201e8c9,41597,1732519786338 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T07:29:46,981 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:29:46,981 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:29:46,981 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:29:46,981 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:29:46,981 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5eb3d201e8c9:0, corePoolSize=10, maxPoolSize=10 2024-11-25T07:29:46,981 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:46,981 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:29:46,981 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:46,983 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732519816983 2024-11-25T07:29:46,983 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T07:29:46,983 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T07:29:46,984 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T07:29:46,984 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T07:29:46,984 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T07:29:46,984 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T07:29:46,984 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:29:46,984 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T07:29:46,984 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:46,984 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T07:29:46,984 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T07:29:46,984 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T07:29:46,985 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T07:29:46,985 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T07:29:46,985 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519786985,5,FailOnTimeoutGroup] 2024-11-25T07:29:46,985 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:46,985 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519786985,5,FailOnTimeoutGroup] 2024-11-25T07:29:46,985 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:46,985 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T07:29:46,985 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:46,985 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T07:29:46,985 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:46,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35967 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:29:46,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:29:46,996 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T07:29:46,996 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a 2024-11-25T07:29:47,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35967 is added to blk_1073741832_1008 (size=32) 2024-11-25T07:29:47,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741832_1008 (size=32) 2024-11-25T07:29:47,008 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:29:47,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:29:47,011 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:29:47,011 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:47,012 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:47,012 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:29:47,013 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(746): ClusterId : 3f9c7dea-9009-4d4c-a343-7dfcbad27125 2024-11-25T07:29:47,013 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T07:29:47,014 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:29:47,014 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:47,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:47,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:29:47,016 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:29:47,016 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:47,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:47,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:29:47,018 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:29:47,018 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:47,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:47,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:29:47,020 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740 2024-11-25T07:29:47,020 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740 2024-11-25T07:29:47,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:29:47,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:29:47,023 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:29:47,023 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T07:29:47,023 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T07:29:47,024 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:29:47,027 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:29:47,027 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770925, jitterRate=-0.01971915364265442}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:29:47,028 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T07:29:47,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732519787008Initializing all the Stores at 1732519787009 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519787009Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519787009Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519787009Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519787009Cleaning up temporary data from old regions at 1732519787022 (+13 ms)Region opened successfully at 1732519787028 (+6 ms) 2024-11-25T07:29:47,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:29:47,028 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:29:47,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:29:47,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:29:47,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:29:47,029 DEBUG [RS:0;5eb3d201e8c9:44859 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54f27856, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:29:47,029 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:29:47,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519787028Disabling compacts and flushes for region at 1732519787028Disabling writes for close at 1732519787029 (+1 ms)Writing region close event to WAL at 1732519787029Closed at 1732519787029 2024-11-25T07:29:47,031 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:29:47,031 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T07:29:47,031 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T07:29:47,032 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:29:47,034 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T07:29:47,078 DEBUG [RS:0;5eb3d201e8c9:44859 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5eb3d201e8c9:44859 2024-11-25T07:29:47,078 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T07:29:47,078 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T07:29:47,078 DEBUG [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T07:29:47,079 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(2659): reportForDuty to master=5eb3d201e8c9,41597,1732519786338 with port=44859, startcode=1732519786393 2024-11-25T07:29:47,080 DEBUG [RS:0;5eb3d201e8c9:44859 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T07:29:47,082 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48759, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T07:29:47,082 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41597 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5eb3d201e8c9,44859,1732519786393 2024-11-25T07:29:47,083 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41597 {}] master.ServerManager(517): Registering regionserver=5eb3d201e8c9,44859,1732519786393 2024-11-25T07:29:47,085 DEBUG [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a 2024-11-25T07:29:47,085 DEBUG [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34285 2024-11-25T07:29:47,085 DEBUG [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T07:29:47,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:29:47,087 DEBUG [RS:0;5eb3d201e8c9:44859 {}] zookeeper.ZKUtil(111): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5eb3d201e8c9,44859,1732519786393 2024-11-25T07:29:47,087 WARN [RS:0;5eb3d201e8c9:44859 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:29:47,087 INFO [RS:0;5eb3d201e8c9:44859 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:29:47,087 DEBUG [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393 2024-11-25T07:29:47,088 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5eb3d201e8c9,44859,1732519786393] 2024-11-25T07:29:47,092 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T07:29:47,094 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T07:29:47,094 INFO [RS:0;5eb3d201e8c9:44859 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T07:29:47,094 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,095 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T07:29:47,096 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T07:29:47,096 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,096 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,096 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,096 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,096 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,096 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,096 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:29:47,096 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,096 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,096 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,096 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,097 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,097 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,097 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:29:47,097 DEBUG [RS:0;5eb3d201e8c9:44859 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:29:47,100 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,101 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,101 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,101 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,101 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,101 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,44859,1732519786393-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:29:47,115 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T07:29:47,115 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,44859,1732519786393-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,116 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,116 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.Replication(171): 5eb3d201e8c9,44859,1732519786393 started 2024-11-25T07:29:47,130 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,130 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(1482): Serving as 5eb3d201e8c9,44859,1732519786393, RpcServer on 5eb3d201e8c9/172.17.0.2:44859, sessionid=0x1014e07e1e10001 2024-11-25T07:29:47,130 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T07:29:47,130 DEBUG [RS:0;5eb3d201e8c9:44859 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5eb3d201e8c9,44859,1732519786393 2024-11-25T07:29:47,130 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,44859,1732519786393' 2024-11-25T07:29:47,130 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T07:29:47,131 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T07:29:47,131 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T07:29:47,131 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T07:29:47,131 DEBUG [RS:0;5eb3d201e8c9:44859 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5eb3d201e8c9,44859,1732519786393 2024-11-25T07:29:47,131 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,44859,1732519786393' 2024-11-25T07:29:47,131 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T07:29:47,132 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T07:29:47,132 DEBUG [RS:0;5eb3d201e8c9:44859 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T07:29:47,132 INFO [RS:0;5eb3d201e8c9:44859 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T07:29:47,132 INFO [RS:0;5eb3d201e8c9:44859 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T07:29:47,216 WARN [5eb3d201e8c9:41597 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T07:29:47,235 INFO [RS:0;5eb3d201e8c9:44859 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C44859%2C1732519786393, suffix=, logDir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393, archiveDir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/oldWALs, maxLogs=32 2024-11-25T07:29:47,236 INFO [RS:0;5eb3d201e8c9:44859 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 2024-11-25T07:29:47,243 INFO [RS:0;5eb3d201e8c9:44859 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 2024-11-25T07:29:47,250 DEBUG [RS:0;5eb3d201e8c9:44859 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35341:35341),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-25T07:29:47,467 DEBUG [5eb3d201e8c9:41597 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T07:29:47,467 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5eb3d201e8c9,44859,1732519786393 2024-11-25T07:29:47,469 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,44859,1732519786393, state=OPENING 2024-11-25T07:29:47,470 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T07:29:47,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:47,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:29:47,473 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:29:47,473 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:29:47,473 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:29:47,473 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,44859,1732519786393}] 2024-11-25T07:29:47,627 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T07:29:47,629 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43687, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T07:29:47,633 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T07:29:47,633 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:29:47,634 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C44859%2C1732519786393.meta, suffix=.meta, logDir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393, archiveDir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/oldWALs, maxLogs=32 2024-11-25T07:29:47,635 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta 2024-11-25T07:29:47,640 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta 2024-11-25T07:29:47,641 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35341:35341),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-25T07:29:47,642 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:29:47,642 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T07:29:47,642 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T07:29:47,642 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T07:29:47,642 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T07:29:47,642 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:29:47,642 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T07:29:47,643 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T07:29:47,644 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:29:47,645 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:29:47,645 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:47,645 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:47,645 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:29:47,646 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:29:47,646 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:47,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:47,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:29:47,647 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:29:47,647 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:47,648 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:47,648 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:29:47,649 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:29:47,649 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:47,649 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:29:47,649 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:29:47,650 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740 2024-11-25T07:29:47,651 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740 2024-11-25T07:29:47,653 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:29:47,653 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:29:47,653 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:29:47,654 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:29:47,655 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821911, jitterRate=0.04511447250843048}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:29:47,655 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T07:29:47,656 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732519787643Writing region info on filesystem at 1732519787643Initializing all the Stores at 1732519787644 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519787644Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519787644Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519787644Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519787644Cleaning up temporary data from old regions at 1732519787653 (+9 ms)Running coprocessor post-open hooks at 1732519787655 (+2 ms)Region opened successfully at 1732519787656 (+1 ms) 2024-11-25T07:29:47,657 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732519787626 2024-11-25T07:29:47,660 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T07:29:47,660 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T07:29:47,661 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5eb3d201e8c9,44859,1732519786393 2024-11-25T07:29:47,662 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,44859,1732519786393, state=OPEN 2024-11-25T07:29:47,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:29:47,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:29:47,667 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,44859,1732519786393 2024-11-25T07:29:47,667 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:29:47,667 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:29:47,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T07:29:47,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,44859,1732519786393 in 194 msec 2024-11-25T07:29:47,673 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T07:29:47,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 639 msec 2024-11-25T07:29:47,675 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:29:47,675 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T07:29:47,676 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:29:47,676 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,44859,1732519786393, seqNum=-1] 2024-11-25T07:29:47,677 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:29:47,678 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43893, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:29:47,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 705 msec 2024-11-25T07:29:47,684 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732519787684, completionTime=-1 2024-11-25T07:29:47,684 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T07:29:47,685 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T07:29:47,686 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T07:29:47,686 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732519847686 2024-11-25T07:29:47,686 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732519907686 2024-11-25T07:29:47,686 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-25T07:29:47,687 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41597,1732519786338-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,687 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41597,1732519786338-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,687 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41597,1732519786338-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,687 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5eb3d201e8c9:41597, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,687 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,687 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,689 DEBUG [master/5eb3d201e8c9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T07:29:47,691 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.258sec 2024-11-25T07:29:47,691 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T07:29:47,691 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T07:29:47,691 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T07:29:47,691 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T07:29:47,691 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T07:29:47,691 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41597,1732519786338-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:29:47,691 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41597,1732519786338-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T07:29:47,693 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T07:29:47,693 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T07:29:47,693 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41597,1732519786338-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,713 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a9565fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:29:47,713 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5eb3d201e8c9,41597,-1 for getting cluster id 2024-11-25T07:29:47,714 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T07:29:47,715 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3f9c7dea-9009-4d4c-a343-7dfcbad27125' 2024-11-25T07:29:47,716 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T07:29:47,716 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3f9c7dea-9009-4d4c-a343-7dfcbad27125" 2024-11-25T07:29:47,716 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6927e468, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:29:47,716 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5eb3d201e8c9,41597,-1] 2024-11-25T07:29:47,717 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T07:29:47,717 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:29:47,718 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54598, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T07:29:47,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@781791f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:29:47,719 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:29:47,720 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,44859,1732519786393, seqNum=-1] 2024-11-25T07:29:47,721 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:29:47,722 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58692, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:29:47,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5eb3d201e8c9,41597,1732519786338 2024-11-25T07:29:47,724 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:47,728 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T07:29:47,746 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:29:47,746 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:47,746 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:47,746 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:29:47,746 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:29:47,746 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:29:47,746 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T07:29:47,747 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:29:47,747 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44807 2024-11-25T07:29:47,749 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44807 connecting to ZooKeeper ensemble=127.0.0.1:56970 2024-11-25T07:29:47,749 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:47,751 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:29:47,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:448070x0, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:29:47,755 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:448070x0, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-25T07:29:47,755 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-25T07:29:47,755 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44807-0x1014e07e1e10002 connected 2024-11-25T07:29:47,756 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T07:29:47,757 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T07:29:47,757 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:44807-0x1014e07e1e10002, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T07:29:47,759 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44807-0x1014e07e1e10002, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:29:47,759 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44807 2024-11-25T07:29:47,759 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44807 2024-11-25T07:29:47,759 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44807 2024-11-25T07:29:47,760 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44807 2024-11-25T07:29:47,760 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44807 2024-11-25T07:29:47,761 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.HRegionServer(746): ClusterId : 3f9c7dea-9009-4d4c-a343-7dfcbad27125 2024-11-25T07:29:47,761 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T07:29:47,763 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T07:29:47,763 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T07:29:47,765 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T07:29:47,766 DEBUG [RS:1;5eb3d201e8c9:44807 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d9472a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:29:47,778 DEBUG [RS:1;5eb3d201e8c9:44807 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;5eb3d201e8c9:44807 2024-11-25T07:29:47,778 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T07:29:47,779 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T07:29:47,779 DEBUG [RS:1;5eb3d201e8c9:44807 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T07:29:47,779 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.HRegionServer(2659): reportForDuty to master=5eb3d201e8c9,41597,1732519786338 with port=44807, startcode=1732519787746 2024-11-25T07:29:47,780 DEBUG [RS:1;5eb3d201e8c9:44807 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T07:29:47,781 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49943, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T07:29:47,782 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41597 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5eb3d201e8c9,44807,1732519787746 2024-11-25T07:29:47,782 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41597 {}] master.ServerManager(517): Registering regionserver=5eb3d201e8c9,44807,1732519787746 2024-11-25T07:29:47,784 DEBUG [RS:1;5eb3d201e8c9:44807 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a 2024-11-25T07:29:47,784 DEBUG [RS:1;5eb3d201e8c9:44807 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34285 2024-11-25T07:29:47,784 DEBUG [RS:1;5eb3d201e8c9:44807 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T07:29:47,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:29:47,788 DEBUG [RS:1;5eb3d201e8c9:44807 {}] zookeeper.ZKUtil(111): regionserver:44807-0x1014e07e1e10002, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5eb3d201e8c9,44807,1732519787746 2024-11-25T07:29:47,788 WARN [RS:1;5eb3d201e8c9:44807 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:29:47,788 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5eb3d201e8c9,44807,1732519787746] 2024-11-25T07:29:47,788 INFO [RS:1;5eb3d201e8c9:44807 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:29:47,788 DEBUG [RS:1;5eb3d201e8c9:44807 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746 2024-11-25T07:29:47,791 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T07:29:47,793 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T07:29:47,796 INFO [RS:1;5eb3d201e8c9:44807 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T07:29:47,796 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,797 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T07:29:47,798 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T07:29:47,798 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,798 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,798 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,798 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,798 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,798 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,798 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:29:47,798 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,798 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,799 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,799 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,799 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,799 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:29:47,799 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:29:47,799 DEBUG [RS:1;5eb3d201e8c9:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:29:47,799 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,799 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,799 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,800 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,800 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,800 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,44807,1732519787746-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:29:47,822 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T07:29:47,822 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,44807,1732519787746-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,822 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,822 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.Replication(171): 5eb3d201e8c9,44807,1732519787746 started 2024-11-25T07:29:47,840 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:29:47,840 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.HRegionServer(1482): Serving as 5eb3d201e8c9,44807,1732519787746, RpcServer on 5eb3d201e8c9/172.17.0.2:44807, sessionid=0x1014e07e1e10002 2024-11-25T07:29:47,841 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T07:29:47,841 DEBUG [RS:1;5eb3d201e8c9:44807 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5eb3d201e8c9,44807,1732519787746 2024-11-25T07:29:47,841 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,44807,1732519787746' 2024-11-25T07:29:47,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;5eb3d201e8c9:44807,5,FailOnTimeoutGroup] 2024-11-25T07:29:47,841 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T07:29:47,841 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-25T07:29:47,841 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T07:29:47,841 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T07:29:47,842 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T07:29:47,842 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T07:29:47,842 DEBUG [RS:1;5eb3d201e8c9:44807 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5eb3d201e8c9,44807,1732519787746 2024-11-25T07:29:47,842 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,44807,1732519787746' 2024-11-25T07:29:47,842 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T07:29:47,842 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T07:29:47,843 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 5eb3d201e8c9,41597,1732519786338 2024-11-25T07:29:47,843 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@18f78161 2024-11-25T07:29:47,843 DEBUG [RS:1;5eb3d201e8c9:44807 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T07:29:47,843 INFO [RS:1;5eb3d201e8c9:44807 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T07:29:47,843 INFO [RS:1;5eb3d201e8c9:44807 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T07:29:47,843 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T07:29:47,845 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54608, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T07:29:47,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T07:29:47,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T07:29:47,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T07:29:47,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-25T07:29:47,850 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T07:29:47,850 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:47,850 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-25T07:29:47,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T07:29:47,851 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T07:29:47,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741835_1011 (size=393) 2024-11-25T07:29:47,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35967 is added to blk_1073741835_1011 (size=393) 2024-11-25T07:29:47,861 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b9d5b52402b54731b92fe45510c70973, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a 2024-11-25T07:29:47,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741836_1012 (size=76) 2024-11-25T07:29:47,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35967 is added to blk_1073741836_1012 (size=76) 2024-11-25T07:29:47,870 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:29:47,871 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing b9d5b52402b54731b92fe45510c70973, disabling compactions & flushes 2024-11-25T07:29:47,871 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:29:47,871 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:29:47,871 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. after waiting 0 ms 2024-11-25T07:29:47,871 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:29:47,871 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:29:47,871 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for b9d5b52402b54731b92fe45510c70973: Waiting for close lock at 1732519787871Disabling compacts and flushes for region at 1732519787871Disabling writes for close at 1732519787871Writing region close event to WAL at 1732519787871Closed at 1732519787871 2024-11-25T07:29:47,873 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T07:29:47,873 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732519787873"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732519787873"}]},"ts":"1732519787873"} 2024-11-25T07:29:47,876 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T07:29:47,878 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T07:29:47,878 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732519787878"}]},"ts":"1732519787878"} 2024-11-25T07:29:47,881 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-25T07:29:47,881 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b9d5b52402b54731b92fe45510c70973, ASSIGN}] 2024-11-25T07:29:47,883 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b9d5b52402b54731b92fe45510c70973, ASSIGN 2024-11-25T07:29:47,884 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b9d5b52402b54731b92fe45510c70973, ASSIGN; state=OFFLINE, location=5eb3d201e8c9,44859,1732519786393; forceNewPlan=false, retain=false 2024-11-25T07:29:47,945 INFO [RS:1;5eb3d201e8c9:44807 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C44807%2C1732519787746, suffix=, logDir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746, archiveDir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/oldWALs, maxLogs=32 2024-11-25T07:29:47,946 INFO [RS:1;5eb3d201e8c9:44807 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 2024-11-25T07:29:47,954 INFO [RS:1;5eb3d201e8c9:44807 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 2024-11-25T07:29:47,955 DEBUG [RS:1;5eb3d201e8c9:44807 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35341:35341),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-25T07:29:48,035 INFO [5eb3d201e8c9:41597 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-25T07:29:48,036 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b9d5b52402b54731b92fe45510c70973, regionState=OPENING, regionLocation=5eb3d201e8c9,44859,1732519786393 2024-11-25T07:29:48,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b9d5b52402b54731b92fe45510c70973, ASSIGN because future has completed 2024-11-25T07:29:48,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b9d5b52402b54731b92fe45510c70973, server=5eb3d201e8c9,44859,1732519786393}] 2024-11-25T07:29:48,198 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:29:48,198 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b9d5b52402b54731b92fe45510c70973, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:29:48,198 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath b9d5b52402b54731b92fe45510c70973 2024-11-25T07:29:48,199 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:29:48,199 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b9d5b52402b54731b92fe45510c70973 2024-11-25T07:29:48,199 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b9d5b52402b54731b92fe45510c70973 2024-11-25T07:29:48,200 INFO [StoreOpener-b9d5b52402b54731b92fe45510c70973-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b9d5b52402b54731b92fe45510c70973 2024-11-25T07:29:48,202 INFO [StoreOpener-b9d5b52402b54731b92fe45510c70973-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b9d5b52402b54731b92fe45510c70973 columnFamilyName info 2024-11-25T07:29:48,202 DEBUG [StoreOpener-b9d5b52402b54731b92fe45510c70973-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:29:48,203 INFO [StoreOpener-b9d5b52402b54731b92fe45510c70973-1 {}] regionserver.HStore(327): Store=b9d5b52402b54731b92fe45510c70973/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:29:48,203 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b9d5b52402b54731b92fe45510c70973 2024-11-25T07:29:48,204 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973 2024-11-25T07:29:48,204 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973 2024-11-25T07:29:48,205 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b9d5b52402b54731b92fe45510c70973 2024-11-25T07:29:48,205 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b9d5b52402b54731b92fe45510c70973 2024-11-25T07:29:48,206 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b9d5b52402b54731b92fe45510c70973 2024-11-25T07:29:48,209 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:29:48,210 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b9d5b52402b54731b92fe45510c70973; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=788217, jitterRate=0.002269834280014038}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T07:29:48,210 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b9d5b52402b54731b92fe45510c70973 2024-11-25T07:29:48,210 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b9d5b52402b54731b92fe45510c70973: Running coprocessor pre-open hook at 1732519788199Writing region info on filesystem at 1732519788199Initializing all the Stores at 1732519788200 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519788200Cleaning up temporary data from old regions at 1732519788205 (+5 ms)Running coprocessor post-open hooks at 1732519788210 (+5 ms)Region opened successfully at 1732519788210 2024-11-25T07:29:48,211 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973., pid=6, masterSystemTime=1732519788193 2024-11-25T07:29:48,214 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:29:48,214 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:29:48,215 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b9d5b52402b54731b92fe45510c70973, regionState=OPEN, openSeqNum=2, regionLocation=5eb3d201e8c9,44859,1732519786393 2024-11-25T07:29:48,218 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b9d5b52402b54731b92fe45510c70973, server=5eb3d201e8c9,44859,1732519786393 because future has completed 2024-11-25T07:29:48,223 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T07:29:48,223 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b9d5b52402b54731b92fe45510c70973, server=5eb3d201e8c9,44859,1732519786393 in 180 msec 2024-11-25T07:29:48,226 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T07:29:48,226 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b9d5b52402b54731b92fe45510c70973, ASSIGN in 342 msec 2024-11-25T07:29:48,227 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T07:29:48,228 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732519788227"}]},"ts":"1732519788227"} 2024-11-25T07:29:48,230 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-25T07:29:48,232 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T07:29:48,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 386 msec 2024-11-25T07:29:53,144 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T07:29:53,146 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:29:53,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:29:53,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:29:53,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:29:53,176 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-25T07:29:55,761 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T07:29:55,762 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-25T07:29:55,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-25T07:29:55,762 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-25T07:29:55,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:29:55,763 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-25T07:29:55,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-25T07:29:55,763 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-25T07:29:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T07:29:57,928 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-25T07:29:57,928 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-25T07:29:57,932 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-25T07:29:57,932 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:29:57,946 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:29:57,949 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:29:57,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:29:57,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:29:57,950 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:29:57,950 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@627eec4d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:29:57,950 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d7890e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:29:58,066 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74c44b7d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/java.io.tmpdir/jetty-localhost-34945-hadoop-hdfs-3_4_1-tests_jar-_-any-12586376320465476359/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:29:58,066 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@167a7fde{HTTP/1.1, (http/1.1)}{localhost:34945} 2024-11-25T07:29:58,066 INFO [Time-limited test {}] server.Server(415): Started @117913ms 2024-11-25T07:29:58,068 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:29:58,112 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:29:58,116 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:29:58,116 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:29:58,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:29:58,117 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:29:58,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4cbb1003{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:29:58,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fb911ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:29:58,176 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6/current/BP-673609853-172.17.0.2-1732519785461/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:58,176 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5/current/BP-673609853-172.17.0.2-1732519785461/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:58,193 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:29:58,195 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x111b8685f8e327a2 with lease ID 0x4f55d6bdd8ce110f: Processing first storage report for DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9 from datanode DatanodeRegistration(127.0.0.1:34615, datanodeUuid=a3e14720-90fd-4867-92b6-5130e1c60561, infoPort=40375, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461) 2024-11-25T07:29:58,195 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x111b8685f8e327a2 with lease ID 0x4f55d6bdd8ce110f: from storage DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9 node DatanodeRegistration(127.0.0.1:34615, datanodeUuid=a3e14720-90fd-4867-92b6-5130e1c60561, infoPort=40375, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:29:58,196 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x111b8685f8e327a2 with lease ID 0x4f55d6bdd8ce110f: Processing first storage report for DS-2e2aedc2-50d0-4d29-a209-cd87561ca7fd from datanode DatanodeRegistration(127.0.0.1:34615, datanodeUuid=a3e14720-90fd-4867-92b6-5130e1c60561, infoPort=40375, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461) 2024-11-25T07:29:58,196 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x111b8685f8e327a2 with lease ID 0x4f55d6bdd8ce110f: from storage DS-2e2aedc2-50d0-4d29-a209-cd87561ca7fd node DatanodeRegistration(127.0.0.1:34615, datanodeUuid=a3e14720-90fd-4867-92b6-5130e1c60561, infoPort=40375, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:29:58,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76c9fd0f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/java.io.tmpdir/jetty-localhost-39093-hadoop-hdfs-3_4_1-tests_jar-_-any-11682317661700521445/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:29:58,235 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1dc59954{HTTP/1.1, (http/1.1)}{localhost:39093} 2024-11-25T07:29:58,235 INFO [Time-limited test {}] server.Server(415): Started @118082ms 2024-11-25T07:29:58,237 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:29:58,273 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:29:58,276 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:29:58,277 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:29:58,277 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:29:58,277 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:29:58,278 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39615ad8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:29:58,278 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64b86931{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:29:58,345 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data7/current/BP-673609853-172.17.0.2-1732519785461/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:58,345 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data8/current/BP-673609853-172.17.0.2-1732519785461/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:58,379 WARN [Thread-843 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:29:58,382 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfed22aa62acb5bb1 with lease ID 0x4f55d6bdd8ce1110: Processing first storage report for DS-8bdf00cf-1033-447a-adca-1e451258426d from datanode DatanodeRegistration(127.0.0.1:44733, datanodeUuid=a48c05c8-ea25-4288-ab5e-77a9371604cc, infoPort=45841, infoSecurePort=0, ipcPort=33101, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461) 2024-11-25T07:29:58,382 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfed22aa62acb5bb1 with lease ID 0x4f55d6bdd8ce1110: from storage DS-8bdf00cf-1033-447a-adca-1e451258426d node DatanodeRegistration(127.0.0.1:44733, datanodeUuid=a48c05c8-ea25-4288-ab5e-77a9371604cc, infoPort=45841, infoSecurePort=0, ipcPort=33101, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:29:58,382 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfed22aa62acb5bb1 with lease ID 0x4f55d6bdd8ce1110: Processing first storage report for DS-79391e56-b93e-4d51-9b14-86c06478a72b from datanode DatanodeRegistration(127.0.0.1:44733, datanodeUuid=a48c05c8-ea25-4288-ab5e-77a9371604cc, infoPort=45841, infoSecurePort=0, ipcPort=33101, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461) 2024-11-25T07:29:58,382 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfed22aa62acb5bb1 with lease ID 0x4f55d6bdd8ce1110: from storage DS-79391e56-b93e-4d51-9b14-86c06478a72b node DatanodeRegistration(127.0.0.1:44733, datanodeUuid=a48c05c8-ea25-4288-ab5e-77a9371604cc, infoPort=45841, infoSecurePort=0, ipcPort=33101, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:29:58,408 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ad6c277{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/java.io.tmpdir/jetty-localhost-46043-hadoop-hdfs-3_4_1-tests_jar-_-any-9517171101885300901/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:29:58,409 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e1eaefc{HTTP/1.1, (http/1.1)}{localhost:46043} 2024-11-25T07:29:58,409 INFO [Time-limited test {}] server.Server(415): Started @118256ms 2024-11-25T07:29:58,410 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:29:58,505 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data9/current/BP-673609853-172.17.0.2-1732519785461/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:58,505 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data10/current/BP-673609853-172.17.0.2-1732519785461/current, will proceed with Du for space computation calculation, 2024-11-25T07:29:58,522 WARN [Thread-878 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:29:58,525 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfe284bbd1f7ba40a with lease ID 0x4f55d6bdd8ce1111: Processing first storage report for DS-7e215f82-20f1-4a8c-8186-1871d004bbf5 from datanode DatanodeRegistration(127.0.0.1:43413, datanodeUuid=cb75ba5b-6376-4c7f-ad43-1a827f1658fd, infoPort=44301, infoSecurePort=0, ipcPort=34499, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461) 2024-11-25T07:29:58,525 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfe284bbd1f7ba40a with lease ID 0x4f55d6bdd8ce1111: from storage DS-7e215f82-20f1-4a8c-8186-1871d004bbf5 node DatanodeRegistration(127.0.0.1:43413, datanodeUuid=cb75ba5b-6376-4c7f-ad43-1a827f1658fd, infoPort=44301, infoSecurePort=0, ipcPort=34499, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:29:58,525 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfe284bbd1f7ba40a with lease ID 0x4f55d6bdd8ce1111: Processing first storage report for DS-09f9281d-855c-4dd5-ad74-3a690e1b6a28 from datanode DatanodeRegistration(127.0.0.1:43413, datanodeUuid=cb75ba5b-6376-4c7f-ad43-1a827f1658fd, infoPort=44301, infoSecurePort=0, ipcPort=34499, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461) 2024-11-25T07:29:58,525 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfe284bbd1f7ba40a with lease ID 0x4f55d6bdd8ce1111: from storage DS-09f9281d-855c-4dd5-ad74-3a690e1b6a28 node DatanodeRegistration(127.0.0.1:43413, datanodeUuid=cb75ba5b-6376-4c7f-ad43-1a827f1658fd, infoPort=44301, infoSecurePort=0, ipcPort=34499, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:29:58,530 WARN [ResponseProcessor for block BP-673609853-172.17.0.2-1732519785461:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-673609853-172.17.0.2-1732519785461:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,530 WARN [ResponseProcessor for block BP-673609853-172.17.0.2-1732519785461:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-673609853-172.17.0.2-1732519785461:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,530 WARN [ResponseProcessor for block BP-673609853-172.17.0.2-1732519785461:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-673609853-172.17.0.2-1732519785461:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,531 WARN [DataStreamer for file /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 block BP-673609853-172.17.0.2-1732519785461:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:29:58,531 WARN [DataStreamer for file /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 block BP-673609853-172.17.0.2-1732519785461:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:29:58,531 WARN [ResponseProcessor for block BP-673609853-172.17.0.2-1732519785461:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-673609853-172.17.0.2-1732519785461:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-673609853-172.17.0.2-1732519785461:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,531 WARN [DataStreamer for file /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/WALs/5eb3d201e8c9,41597,1732519786338/5eb3d201e8c9%2C41597%2C1732519786338.1732519786903 block BP-673609853-172.17.0.2-1732519785461:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK], DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:29:58,532 WARN [PacketResponder: BP-673609853-172.17.0.2-1732519785461:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35967] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:29:58,532 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e64a045{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:29:58,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-33197741_22 at /127.0.0.1:45706 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:35967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45706 dst: /127.0.0.1:35967 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:29:58,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:45670 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45670 dst: /127.0.0.1:35967 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:29:58,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-33197741_22 at /127.0.0.1:36572 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:32821:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36572 dst: /127.0.0.1:32821 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:29:58,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1870597311_22 at /127.0.0.1:45644 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45644 dst: /127.0.0.1:35967 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:29:58,533 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1495e1af{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:29:58,533 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:29:58,533 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@372d60ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:29:58,533 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51561b8f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,STOPPED} 2024-11-25T07:29:58,534 WARN [DataStreamer for file /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta block BP-673609853-172.17.0.2-1732519785461:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:29:58,534 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36534 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:32821:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36534 dst: /127.0.0.1:32821 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:29:58,534 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36548 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:32821:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36548 dst: /127.0.0.1:32821 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:29:58,534 WARN [BP-673609853-172.17.0.2-1732519785461 heartbeating to localhost/127.0.0.1:34285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:29:58,534 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1870597311_22 at /127.0.0.1:36518 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:32821:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36518 dst: /127.0.0.1:32821 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:29:58,534 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:29:58,535 WARN [BP-673609853-172.17.0.2-1732519785461 heartbeating to localhost/127.0.0.1:34285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-673609853-172.17.0.2-1732519785461 (Datanode Uuid 0067738f-ec80-43c7-978e-8f5a2b9b9e66) service to localhost/127.0.0.1:34285 2024-11-25T07:29:58,535 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:29:58,535 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data3/current/BP-673609853-172.17.0.2-1732519785461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:29:58,535 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data4/current/BP-673609853-172.17.0.2-1732519785461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:29:58,536 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:29:58,534 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:45684 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45684 dst: /127.0.0.1:35967 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:29:58,538 WARN [DataStreamer for file /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 block BP-673609853-172.17.0.2-1732519785461:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,538 WARN [DataStreamer for file /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/WALs/5eb3d201e8c9,41597,1732519786338/5eb3d201e8c9%2C41597%2C1732519786338.1732519786903 block BP-673609853-172.17.0.2-1732519785461:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,538 WARN [DataStreamer for file /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta block BP-673609853-172.17.0.2-1732519785461:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,538 WARN [DataStreamer for file /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 block BP-673609853-172.17.0.2-1732519785461:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,541 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de86657{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:29:58,542 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6787773a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:29:58,542 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:29:58,542 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a107105{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:29:58,542 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65dec1b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,STOPPED} 2024-11-25T07:29:58,543 WARN [BP-673609853-172.17.0.2-1732519785461 heartbeating to localhost/127.0.0.1:34285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:29:58,544 WARN [BP-673609853-172.17.0.2-1732519785461 heartbeating to localhost/127.0.0.1:34285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-673609853-172.17.0.2-1732519785461 (Datanode Uuid fd7732db-5526-486e-931c-c37b0c35cc54) service to localhost/127.0.0.1:34285 2024-11-25T07:29:58,544 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data1/current/BP-673609853-172.17.0.2-1732519785461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:29:58,544 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data2/current/BP-673609853-172.17.0.2-1732519785461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:29:58,544 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:29:58,544 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:29:58,545 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:29:58,549 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973., hostname=5eb3d201e8c9,44859,1732519786393, seqNum=2] 2024-11-25T07:29:58,551 ERROR [FSHLog-0-hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a-prefix:5eb3d201e8c9,44859,1732519786393 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,551 WARN [FSHLog-0-hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a-prefix:5eb3d201e8c9,44859,1732519786393 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,551 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,551 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C44859%2C1732519786393:(num 1732519787236) roll requested 2024-11-25T07:29:58,551 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C44859%2C1732519786393.1732519798551 2024-11-25T07:29:58,554 WARN [Thread-900 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,554 WARN [Thread-900 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:29:58,554 WARN [Thread-900 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741838_1018 2024-11-25T07:29:58,557 WARN [Thread-900 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK] 2024-11-25T07:29:58,562 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:58,563 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:58,563 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:58,563 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:58,563 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:29:58,563 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519798551 2024-11-25T07:29:58,563 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,564 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:29:58,564 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45841:45841),(127.0.0.1/127.0.0.1:44301:44301)] 2024-11-25T07:29:58,564 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 is not closed yet, will try archiving it next time 2024-11-25T07:29:58,565 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-25T07:29:58,565 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-25T07:29:58,565 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 2024-11-25T07:29:58,568 WARN [IPC Server handler 2 on default port 34285 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-25T07:29:58,571 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 after 4ms 2024-11-25T07:29:59,800 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:00,477 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:00,564 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:00,565 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519798551 2024-11-25T07:30:00,566 WARN [ResponseProcessor for block BP-673609853-172.17.0.2-1732519785461:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-673609853-172.17.0.2-1732519785461:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:00,566 WARN [DataStreamer for file /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519798551 block BP-673609853-172.17.0.2-1732519785461:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:00,567 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:51542 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:44733:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51542 dst: /127.0.0.1:44733 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:00,567 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:60646 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:43413:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60646 dst: /127.0.0.1:43413 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:00,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76c9fd0f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:00,570 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1dc59954{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:30:00,570 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:30:00,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fb911ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:30:00,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4cbb1003{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,STOPPED} 2024-11-25T07:30:00,571 WARN [BP-673609853-172.17.0.2-1732519785461 heartbeating to localhost/127.0.0.1:34285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:30:00,571 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:30:00,571 WARN [BP-673609853-172.17.0.2-1732519785461 heartbeating to localhost/127.0.0.1:34285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-673609853-172.17.0.2-1732519785461 (Datanode Uuid a48c05c8-ea25-4288-ab5e-77a9371604cc) service to localhost/127.0.0.1:34285 2024-11-25T07:30:00,571 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:30:00,572 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data7/current/BP-673609853-172.17.0.2-1732519785461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:00,572 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data8/current/BP-673609853-172.17.0.2-1732519785461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:00,572 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:30:01,800 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:02,477 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:02,565 WARN [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]] 2024-11-25T07:30:02,565 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:02,565 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C44859%2C1732519786393:(num 1732519798551) roll requested 2024-11-25T07:30:02,566 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C44859%2C1732519786393.1732519802565 2024-11-25T07:30:02,569 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:02,569 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:02,569 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741840_1022 2024-11-25T07:30:02,570 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:02,572 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 after 4007ms 2024-11-25T07:30:02,572 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35967 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:02,572 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49786 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741841_1023 to mirror 127.0.0.1:35967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:02,573 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:30:02,573 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741841_1023 2024-11-25T07:30:02,573 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49786 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-25T07:30:02,573 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49786 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49786 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:02,573 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK] 2024-11-25T07:30:02,574 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:02,574 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK], DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]) is bad. 2024-11-25T07:30:02,574 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741842_1024 2024-11-25T07:30:02,575 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK] 2024-11-25T07:30:02,578 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:02,579 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:02,579 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:02,579 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:02,579 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:02,579 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519798551 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519802565 2024-11-25T07:30:02,580 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40375:40375),(127.0.0.1/127.0.0.1:44301:44301)] 2024-11-25T07:30:02,580 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 is not closed yet, will try archiving it next time 2024-11-25T07:30:02,580 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519798551 is not closed yet, will try archiving it next time 2024-11-25T07:30:02,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43413 is added to blk_1073741839_1021 (size=2431) 2024-11-25T07:30:02,582 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T07:30:02,982 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 is not closed yet, will try archiving it next time 2024-11-25T07:30:03,801 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:04,478 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:04,535 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6659517b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43413, datanodeUuid=cb75ba5b-6376-4c7f-ad43-1a827f1658fd, infoPort=44301, infoSecurePort=0, ipcPort=34499, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461):Failed to transfer BP-673609853-172.17.0.2-1732519785461:blk_1073741839_1021 to 127.0.0.1:35967 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:04,580 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:04,585 WARN [ResponseProcessor for block BP-673609853-172.17.0.2-1732519785461:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-673609853-172.17.0.2-1732519785461:blk_1073741843_1025 java.io.IOException: Bad response ERROR for BP-673609853-172.17.0.2-1732519785461:blk_1073741843_1025 from datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:04,586 WARN [DataStreamer for file /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519802565 block BP-673609853-172.17.0.2-1732519785461:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:04,586 WARN [PacketResponder: BP-673609853-172.17.0.2-1732519785461:blk_1073741843_1025, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43413] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:04,586 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49794 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49794 dst: /127.0.0.1:34615 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:04,587 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:48486 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:43413:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48486 dst: /127.0.0.1:43413 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:04,590 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ad6c277{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:04,590 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e1eaefc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:30:04,590 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:30:04,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64b86931{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:30:04,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39615ad8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,STOPPED} 2024-11-25T07:30:04,592 WARN [BP-673609853-172.17.0.2-1732519785461 heartbeating to localhost/127.0.0.1:34285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:30:04,592 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:30:04,592 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:30:04,592 WARN [BP-673609853-172.17.0.2-1732519785461 heartbeating to localhost/127.0.0.1:34285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-673609853-172.17.0.2-1732519785461 (Datanode Uuid cb75ba5b-6376-4c7f-ad43-1a827f1658fd) service to localhost/127.0.0.1:34285 2024-11-25T07:30:04,593 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data9/current/BP-673609853-172.17.0.2-1732519785461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:04,593 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data10/current/BP-673609853-172.17.0.2-1732519785461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:04,593 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:30:04,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44859 {}] regionserver.HRegion(8855): Flush requested on b9d5b52402b54731b92fe45510c70973 2024-11-25T07:30:04,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b9d5b52402b54731b92fe45510c70973 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T07:30:04,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/a1703d8d12df468885273d28547e48f7 is 1080, key is row0002/info:/1732519800574/Put/seqid=0 2024-11-25T07:30:04,636 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:04,636 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]) is bad. 2024-11-25T07:30:04,636 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741844_1027 2024-11-25T07:30:04,637 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK] 2024-11-25T07:30:04,638 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:04,638 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:30:04,639 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741845_1028 2024-11-25T07:30:04,640 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK] 2024-11-25T07:30:04,643 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:04,642 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49816 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741846_1029 to mirror 127.0.0.1:43413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:04,643 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:04,643 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741846_1029 2024-11-25T07:30:04,643 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49816 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:04,643 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49816 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49816 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:04,644 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:04,645 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:04,645 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:04,645 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741847_1030 2024-11-25T07:30:04,646 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:04,647 WARN [IPC Server handler 2 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T07:30:04,647 WARN [IPC Server handler 2 on default port 34285 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T07:30:04,647 WARN [IPC Server handler 2 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T07:30:04,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741848_1031 (size=10347) 2024-11-25T07:30:05,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/a1703d8d12df468885273d28547e48f7 2024-11-25T07:30:05,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/a1703d8d12df468885273d28547e48f7 as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/a1703d8d12df468885273d28547e48f7 2024-11-25T07:30:05,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/a1703d8d12df468885273d28547e48f7, entries=5, sequenceid=11, filesize=10.1 K 2024-11-25T07:30:05,073 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for b9d5b52402b54731b92fe45510c70973 in 468ms, sequenceid=11, compaction requested=false 2024-11-25T07:30:05,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b9d5b52402b54731b92fe45510c70973: 2024-11-25T07:30:05,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44859 {}] regionserver.HRegion(8855): Flush requested on b9d5b52402b54731b92fe45510c70973 2024-11-25T07:30:05,232 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b9d5b52402b54731b92fe45510c70973 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-25T07:30:05,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/fee695575c9a468392b18fb4d57a430e is 1080, key is row0007/info:/1732519804606/Put/seqid=0 2024-11-25T07:30:05,238 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:05,239 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK], DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:05,239 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741849_1032 2024-11-25T07:30:05,239 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:05,240 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:05,240 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:30:05,240 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741850_1033 2024-11-25T07:30:05,241 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK] 2024-11-25T07:30:05,242 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:05,242 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]) is bad. 2024-11-25T07:30:05,242 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741851_1034 2024-11-25T07:30:05,243 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK] 2024-11-25T07:30:05,244 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:05,244 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:05,244 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741852_1035 2024-11-25T07:30:05,244 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:05,245 WARN [IPC Server handler 1 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T07:30:05,245 WARN [IPC Server handler 1 on default port 34285 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T07:30:05,245 WARN [IPC Server handler 1 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T07:30:05,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741853_1036 (size=12506) 2024-11-25T07:30:05,249 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/fee695575c9a468392b18fb4d57a430e 2024-11-25T07:30:05,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/fee695575c9a468392b18fb4d57a430e as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/fee695575c9a468392b18fb4d57a430e 2024-11-25T07:30:05,260 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/fee695575c9a468392b18fb4d57a430e, entries=7, sequenceid=24, filesize=12.2 K 2024-11-25T07:30:05,261 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=0 B/0 for b9d5b52402b54731b92fe45510c70973 in 29ms, sequenceid=24, compaction requested=false 2024-11-25T07:30:05,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b9d5b52402b54731b92fe45510c70973: 2024-11-25T07:30:05,261 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-25T07:30:05,261 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:30:05,261 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/fee695575c9a468392b18fb4d57a430e because midkey is the same as first or last row 2024-11-25T07:30:05,801 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:06,478 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:06,581 WARN [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]] 2024-11-25T07:30:06,581 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:06,581 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C44859%2C1732519786393:(num 1732519802565) roll requested 2024-11-25T07:30:06,582 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C44859%2C1732519786393.1732519806581 2024-11-25T07:30:06,588 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:06,588 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:06,588 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741854_1037 2024-11-25T07:30:06,589 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:06,590 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:06,591 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:30:06,591 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741855_1038 2024-11-25T07:30:06,592 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK] 2024-11-25T07:30:06,593 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:06,594 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:06,594 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741856_1039 2024-11-25T07:30:06,594 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:06,602 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49848 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741857_1040 to mirror 127.0.0.1:32821 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:06,603 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49848 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-25T07:30:06,603 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49848 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49848 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:06,602 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32821 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:06,603 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]) is bad. 2024-11-25T07:30:06,603 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741857_1040 2024-11-25T07:30:06,604 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK] 2024-11-25T07:30:06,605 WARN [IPC Server handler 2 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T07:30:06,605 WARN [IPC Server handler 2 on default port 34285 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T07:30:06,605 WARN [IPC Server handler 2 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T07:30:06,615 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:06,615 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:06,615 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:06,615 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:06,615 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:06,616 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519802565 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519806581 2024-11-25T07:30:06,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741843_1026 (size=25992) 2024-11-25T07:30:06,622 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 is not closed yet, will try archiving it next time 2024-11-25T07:30:06,626 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40375:40375)] 2024-11-25T07:30:06,627 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 is not closed yet, will try archiving it next time 2024-11-25T07:30:06,628 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519798551 to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/oldWALs/5eb3d201e8c9%2C44859%2C1732519786393.1732519798551 2024-11-25T07:30:06,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44859 {}] regionserver.HRegion(8855): Flush requested on b9d5b52402b54731b92fe45510c70973 2024-11-25T07:30:06,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b9d5b52402b54731b92fe45510c70973 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-25T07:30:06,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/70e69983da0a4d898dee8e3ca799206d is 1079, key is tmprow/info:/1732519806654/Put/seqid=0 2024-11-25T07:30:06,662 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:06,663 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK], DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:06,663 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741859_1042 2024-11-25T07:30:06,663 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:06,665 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:06,665 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:06,665 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741860_1043 2024-11-25T07:30:06,666 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:06,667 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:06,667 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]) is bad. 2024-11-25T07:30:06,667 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741861_1044 2024-11-25T07:30:06,668 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK] 2024-11-25T07:30:06,669 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:06,669 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:30:06,670 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741862_1045 2024-11-25T07:30:06,670 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK] 2024-11-25T07:30:06,671 WARN [IPC Server handler 4 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T07:30:06,671 WARN [IPC Server handler 4 on default port 34285 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T07:30:06,671 WARN [IPC Server handler 4 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T07:30:06,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741863_1046 (size=6027) 2024-11-25T07:30:07,075 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/70e69983da0a4d898dee8e3ca799206d 2024-11-25T07:30:07,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/70e69983da0a4d898dee8e3ca799206d as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/70e69983da0a4d898dee8e3ca799206d 2024-11-25T07:30:07,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/70e69983da0a4d898dee8e3ca799206d, entries=1, sequenceid=34, filesize=5.9 K 2024-11-25T07:30:07,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for b9d5b52402b54731b92fe45510c70973 in 435ms, sequenceid=34, compaction requested=true 2024-11-25T07:30:07,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b9d5b52402b54731b92fe45510c70973: 2024-11-25T07:30:07,092 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-25T07:30:07,092 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:30:07,092 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/fee695575c9a468392b18fb4d57a430e because midkey is the same as first or last row 2024-11-25T07:30:07,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9d5b52402b54731b92fe45510c70973:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T07:30:07,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:30:07,093 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T07:30:07,094 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T07:30:07,094 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.HStore(1541): b9d5b52402b54731b92fe45510c70973/info is initiating minor compaction (all files) 2024-11-25T07:30:07,094 INFO [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b9d5b52402b54731b92fe45510c70973/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:30:07,094 INFO [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/a1703d8d12df468885273d28547e48f7, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/fee695575c9a468392b18fb4d57a430e, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/70e69983da0a4d898dee8e3ca799206d] into tmpdir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp, totalSize=28.2 K 2024-11-25T07:30:07,095 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] compactions.Compactor(225): Compacting a1703d8d12df468885273d28547e48f7, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732519800574 2024-11-25T07:30:07,095 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] compactions.Compactor(225): Compacting fee695575c9a468392b18fb4d57a430e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732519804606 2024-11-25T07:30:07,096 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] compactions.Compactor(225): Compacting 70e69983da0a4d898dee8e3ca799206d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732519806654 2024-11-25T07:30:07,112 INFO [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9d5b52402b54731b92fe45510c70973#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:30:07,112 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/7de6f7bb5fc848ddb1553bcd31ee6a87 is 1080, key is row0002/info:/1732519800574/Put/seqid=0 2024-11-25T07:30:07,114 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:07,115 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:07,115 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741864_1047 2024-11-25T07:30:07,115 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:07,118 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49888 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741865_1048 to mirror 127.0.0.1:32821 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:07,118 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32821 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:07,118 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49888 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:07,118 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]) is bad. 2024-11-25T07:30:07,118 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741865_1048 2024-11-25T07:30:07,118 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49888 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49888 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:07,119 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK] 2024-11-25T07:30:07,121 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:07,121 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49896 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741866_1049 to mirror 127.0.0.1:43413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:07,121 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:07,121 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741866_1049 2024-11-25T07:30:07,121 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49896 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:07,121 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49896 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49896 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:07,122 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:07,123 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:07,123 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:30:07,124 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741867_1050 2024-11-25T07:30:07,124 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK] 2024-11-25T07:30:07,125 WARN [IPC Server handler 3 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T07:30:07,125 WARN [IPC Server handler 3 on default port 34285 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T07:30:07,125 WARN [IPC Server handler 3 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T07:30:07,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741868_1051 (size=17994) 2024-11-25T07:30:07,198 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6f5ef6a1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34615, datanodeUuid=a3e14720-90fd-4867-92b6-5130e1c60561, infoPort=40375, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461):Failed to transfer BP-673609853-172.17.0.2-1732519785461:blk_1073741848_1031 to 127.0.0.1:35967 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:07,198 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5560b162[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34615, datanodeUuid=a3e14720-90fd-4867-92b6-5130e1c60561, infoPort=40375, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461):Failed to transfer BP-673609853-172.17.0.2-1732519785461:blk_1073741853_1036 to 127.0.0.1:44733 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:07,538 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/7de6f7bb5fc848ddb1553bcd31ee6a87 as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/7de6f7bb5fc848ddb1553bcd31ee6a87 2024-11-25T07:30:07,547 INFO [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b9d5b52402b54731b92fe45510c70973/info of b9d5b52402b54731b92fe45510c70973 into 7de6f7bb5fc848ddb1553bcd31ee6a87(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:30:07,547 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b9d5b52402b54731b92fe45510c70973: 2024-11-25T07:30:07,547 INFO [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973., storeName=b9d5b52402b54731b92fe45510c70973/info, priority=13, startTime=1732519807092; duration=0sec 2024-11-25T07:30:07,547 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-25T07:30:07,547 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:30:07,547 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/7de6f7bb5fc848ddb1553bcd31ee6a87 because midkey is the same as first or last row 2024-11-25T07:30:07,547 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-25T07:30:07,547 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:30:07,547 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/7de6f7bb5fc848ddb1553bcd31ee6a87 because midkey is the same as first or last row 2024-11-25T07:30:07,547 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-25T07:30:07,548 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:30:07,548 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/7de6f7bb5fc848ddb1553bcd31ee6a87 because midkey is the same as first or last row 2024-11-25T07:30:07,548 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:30:07,548 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9d5b52402b54731b92fe45510c70973:info 2024-11-25T07:30:07,802 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44859 {}] regionserver.HRegion(8855): Flush requested on b9d5b52402b54731b92fe45510c70973 2024-11-25T07:30:08,079 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b9d5b52402b54731b92fe45510c70973 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-25T07:30:08,084 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/5057b188465148b0ae0a94f301a72701 is 1079, key is tmprow/info:/1732519808078/Put/seqid=0 2024-11-25T07:30:08,087 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35967 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:08,087 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49916 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741869_1052 to mirror 127.0.0.1:35967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:08,087 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:30:08,087 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741869_1052 2024-11-25T07:30:08,087 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49916 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:08,087 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49916 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49916 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:08,087 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK] 2024-11-25T07:30:08,089 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:08,089 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:08,089 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741870_1053 2024-11-25T07:30:08,090 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:08,091 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:08,092 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]) is bad. 2024-11-25T07:30:08,092 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741871_1054 2024-11-25T07:30:08,092 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK] 2024-11-25T07:30:08,095 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44733 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:08,095 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49928 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741872_1055 to mirror 127.0.0.1:44733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:08,095 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:08,095 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741872_1055 2024-11-25T07:30:08,095 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49928 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:08,095 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49928 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49928 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:08,096 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:08,097 WARN [IPC Server handler 0 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T07:30:08,097 WARN [IPC Server handler 0 on default port 34285 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T07:30:08,097 WARN [IPC Server handler 0 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T07:30:08,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741873_1056 (size=6027) 2024-11-25T07:30:08,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/5057b188465148b0ae0a94f301a72701 2024-11-25T07:30:08,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/5057b188465148b0ae0a94f301a72701 as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/5057b188465148b0ae0a94f301a72701 2024-11-25T07:30:08,112 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/5057b188465148b0ae0a94f301a72701, entries=1, sequenceid=45, filesize=5.9 K 2024-11-25T07:30:08,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for b9d5b52402b54731b92fe45510c70973 in 34ms, sequenceid=45, compaction requested=false 2024-11-25T07:30:08,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b9d5b52402b54731b92fe45510c70973: 2024-11-25T07:30:08,114 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-25T07:30:08,114 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:30:08,114 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/7de6f7bb5fc848ddb1553bcd31ee6a87 because midkey is the same as first or last row 2024-11-25T07:30:08,196 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5560b162[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34615, datanodeUuid=a3e14720-90fd-4867-92b6-5130e1c60561, infoPort=40375, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461):Failed to transfer BP-673609853-172.17.0.2-1732519785461:blk_1073741843_1026 to 127.0.0.1:44733 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:08,197 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6f5ef6a1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34615, datanodeUuid=a3e14720-90fd-4867-92b6-5130e1c60561, infoPort=40375, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461):Failed to transfer BP-673609853-172.17.0.2-1732519785461:blk_1073741863_1046 to 127.0.0.1:44733 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:08,479 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:08,627 WARN [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]] 2024-11-25T07:30:08,627 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:08,627 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C44859%2C1732519786393:(num 1732519806581) roll requested 2024-11-25T07:30:08,628 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C44859%2C1732519786393.1732519808627 2024-11-25T07:30:08,632 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35967 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:08,632 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49944 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741874_1057 to mirror 127.0.0.1:35967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:08,632 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:30:08,632 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741874_1057 2024-11-25T07:30:08,632 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49944 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-25T07:30:08,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49944 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49944 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:08,633 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK] 2024-11-25T07:30:08,634 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:08,635 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:08,635 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741875_1058 2024-11-25T07:30:08,635 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:08,636 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:08,637 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:08,637 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741876_1059 2024-11-25T07:30:08,637 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:08,638 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:08,639 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]) is bad. 2024-11-25T07:30:08,639 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741877_1060 2024-11-25T07:30:08,639 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK] 2024-11-25T07:30:08,640 WARN [IPC Server handler 2 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T07:30:08,640 WARN [IPC Server handler 2 on default port 34285 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T07:30:08,640 WARN [IPC Server handler 2 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T07:30:08,642 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:08,642 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:08,643 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:08,643 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:08,643 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:08,643 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519806581 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519808627 2024-11-25T07:30:08,644 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40375:40375)] 2024-11-25T07:30:08,644 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 is not closed yet, will try archiving it next time 2024-11-25T07:30:08,644 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519806581 is not closed yet, will try archiving it next time 2024-11-25T07:30:08,644 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519802565 to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/oldWALs/5eb3d201e8c9%2C44859%2C1732519786393.1732519802565 2024-11-25T07:30:08,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741858_1041 (size=13591) 2024-11-25T07:30:09,046 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 is not closed yet, will try archiving it next time 2024-11-25T07:30:09,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44859 {}] regionserver.HRegion(8855): Flush requested on b9d5b52402b54731b92fe45510c70973 2024-11-25T07:30:09,504 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b9d5b52402b54731b92fe45510c70973 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-25T07:30:09,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/d5ef80589a3d43d6816678dfd7c82182 is 1079, key is tmprow/info:/1732519809502/Put/seqid=0 2024-11-25T07:30:09,529 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:09,529 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:09,529 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741879_1062 2024-11-25T07:30:09,530 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:09,537 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32821 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:09,536 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49964 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741880_1063 to mirror 127.0.0.1:32821 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:09,537 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]) is bad. 2024-11-25T07:30:09,537 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741880_1063 2024-11-25T07:30:09,537 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49964 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:09,537 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49964 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49964 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:09,538 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK] 2024-11-25T07:30:09,541 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49976 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741881_1064 to mirror 127.0.0.1:43413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:09,541 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49976 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:09,541 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49976 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49976 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:09,544 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:09,545 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:09,545 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741881_1064 2024-11-25T07:30:09,545 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:09,548 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:09,549 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:30:09,549 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741882_1065 2024-11-25T07:30:09,551 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK] 2024-11-25T07:30:09,552 WARN [IPC Server handler 3 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T07:30:09,552 WARN [IPC Server handler 3 on default port 34285 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T07:30:09,552 WARN [IPC Server handler 3 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T07:30:09,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741883_1066 (size=6027) 2024-11-25T07:30:09,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/d5ef80589a3d43d6816678dfd7c82182 2024-11-25T07:30:09,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/d5ef80589a3d43d6816678dfd7c82182 as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/d5ef80589a3d43d6816678dfd7c82182 2024-11-25T07:30:09,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/d5ef80589a3d43d6816678dfd7c82182, entries=1, sequenceid=55, filesize=5.9 K 2024-11-25T07:30:09,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for b9d5b52402b54731b92fe45510c70973 in 104ms, sequenceid=55, compaction requested=true 2024-11-25T07:30:09,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b9d5b52402b54731b92fe45510c70973: 2024-11-25T07:30:09,608 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-25T07:30:09,608 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:30:09,608 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/7de6f7bb5fc848ddb1553bcd31ee6a87 because midkey is the same as first or last row 2024-11-25T07:30:09,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9d5b52402b54731b92fe45510c70973:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T07:30:09,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:30:09,608 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T07:30:09,612 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T07:30:09,612 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.HStore(1541): b9d5b52402b54731b92fe45510c70973/info is initiating minor compaction (all files) 2024-11-25T07:30:09,612 INFO [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b9d5b52402b54731b92fe45510c70973/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:30:09,612 INFO [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/7de6f7bb5fc848ddb1553bcd31ee6a87, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/5057b188465148b0ae0a94f301a72701, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/d5ef80589a3d43d6816678dfd7c82182] into tmpdir=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp, totalSize=29.3 K 2024-11-25T07:30:09,613 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7de6f7bb5fc848ddb1553bcd31ee6a87, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732519800574 2024-11-25T07:30:09,613 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5057b188465148b0ae0a94f301a72701, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732519808078 2024-11-25T07:30:09,614 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] compactions.Compactor(225): Compacting d5ef80589a3d43d6816678dfd7c82182, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732519809502 2024-11-25T07:30:09,635 INFO [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9d5b52402b54731b92fe45510c70973#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:30:09,636 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/1ac8c640d29b48c58be3d48103e08be5 is 1080, key is row0002/info:/1732519800574/Put/seqid=0 2024-11-25T07:30:09,639 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32821 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:09,639 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49998 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741884_1067 to mirror 127.0.0.1:32821 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:09,639 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]) is bad. 2024-11-25T07:30:09,640 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741884_1067 2024-11-25T07:30:09,640 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49998 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:09,640 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:49998 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49998 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:09,641 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK] 2024-11-25T07:30:09,642 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:09,642 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK], DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:09,642 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741885_1068 2024-11-25T07:30:09,643 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:09,644 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:09,644 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:09,644 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741886_1069 2024-11-25T07:30:09,644 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:09,649 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35967 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:09,649 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:50002 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741887_1070 to mirror 127.0.0.1:35967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:09,649 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]) is bad. 2024-11-25T07:30:09,649 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741887_1070 2024-11-25T07:30:09,649 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:50002 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:09,649 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:50002 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50002 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:09,655 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35967,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK] 2024-11-25T07:30:09,657 WARN [IPC Server handler 3 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T07:30:09,657 WARN [IPC Server handler 3 on default port 34285 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T07:30:09,657 WARN [IPC Server handler 3 on default port 34285 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T07:30:09,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741888_1071 (size=18097) 2024-11-25T07:30:09,682 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/1ac8c640d29b48c58be3d48103e08be5 as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/1ac8c640d29b48c58be3d48103e08be5 2024-11-25T07:30:09,692 INFO [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b9d5b52402b54731b92fe45510c70973/info of b9d5b52402b54731b92fe45510c70973 into 1ac8c640d29b48c58be3d48103e08be5(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:30:09,692 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b9d5b52402b54731b92fe45510c70973: 2024-11-25T07:30:09,692 INFO [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973., storeName=b9d5b52402b54731b92fe45510c70973/info, priority=13, startTime=1732519809608; duration=0sec 2024-11-25T07:30:09,693 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-25T07:30:09,693 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:30:09,693 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/1ac8c640d29b48c58be3d48103e08be5 because midkey is the same as first or last row 2024-11-25T07:30:09,693 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-25T07:30:09,693 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:30:09,693 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/1ac8c640d29b48c58be3d48103e08be5 because midkey is the same as first or last row 2024-11-25T07:30:09,693 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-25T07:30:09,693 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:30:09,693 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/1ac8c640d29b48c58be3d48103e08be5 because midkey is the same as first or last row 2024-11-25T07:30:09,693 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:30:09,693 DEBUG [RS:0;5eb3d201e8c9:44859-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9d5b52402b54731b92fe45510c70973:info 2024-11-25T07:30:09,802 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:10,198 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5560b162[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34615, datanodeUuid=a3e14720-90fd-4867-92b6-5130e1c60561, infoPort=40375, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461):Failed to transfer BP-673609853-172.17.0.2-1732519785461:blk_1073741873_1056 to 127.0.0.1:35967 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:10,198 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6f5ef6a1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34615, datanodeUuid=a3e14720-90fd-4867-92b6-5130e1c60561, infoPort=40375, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461):Failed to transfer BP-673609853-172.17.0.2-1732519785461:blk_1073741868_1051 to 127.0.0.1:35967 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:10,479 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:10,644 WARN [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-25T07:30:10,644 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:10,729 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:30:10,735 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:30:10,736 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:30:10,736 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:30:10,737 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:30:10,737 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bf26ed1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:30:10,737 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@287f99dc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:30:10,855 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@50153eb5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/java.io.tmpdir/jetty-localhost-46375-hadoop-hdfs-3_4_1-tests_jar-_-any-16817075872148175716/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:10,855 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17162101{HTTP/1.1, (http/1.1)}{localhost:46375} 2024-11-25T07:30:10,855 INFO [Time-limited test {}] server.Server(415): Started @130702ms 2024-11-25T07:30:10,857 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:30:10,976 WARN [Thread-988 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:30:10,985 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd33025a78e201f2 with lease ID 0x4f55d6bdd8ce1112: from storage DS-2e563855-25ac-4d93-b585-1e5288732181 node DatanodeRegistration(127.0.0.1:45801, datanodeUuid=0067738f-ec80-43c7-978e-8f5a2b9b9e66, infoPort=42867, infoSecurePort=0, ipcPort=45287, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T07:30:10,985 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd33025a78e201f2 with lease ID 0x4f55d6bdd8ce1112: from storage DS-c2430039-5d2a-46d7-a7d1-8ea086e9ea37 node DatanodeRegistration(127.0.0.1:45801, datanodeUuid=0067738f-ec80-43c7-978e-8f5a2b9b9e66, infoPort=42867, infoSecurePort=0, ipcPort=45287, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:30:11,198 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6f5ef6a1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34615, datanodeUuid=a3e14720-90fd-4867-92b6-5130e1c60561, infoPort=40375, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461):Failed to transfer BP-673609853-172.17.0.2-1732519785461:blk_1073741858_1041 to 127.0.0.1:44733 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:11,198 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5560b162[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34615, datanodeUuid=a3e14720-90fd-4867-92b6-5130e1c60561, infoPort=40375, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461):Failed to transfer BP-673609853-172.17.0.2-1732519785461:blk_1073741883_1066 to 127.0.0.1:43413 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:11,802 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:12,480 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:12,645 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:13,200 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5560b162[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34615, datanodeUuid=a3e14720-90fd-4867-92b6-5130e1c60561, infoPort=40375, infoSecurePort=0, ipcPort=35519, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461):Failed to transfer BP-673609853-172.17.0.2-1732519785461:blk_1073741888_1071 to 127.0.0.1:44733 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:13,803 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:14,480 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:14,645 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:15,803 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:16,319 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T07:30:16,481 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:16,645 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:16,984 ERROR [FSHLog-0-hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData-prefix:5eb3d201e8c9,41597,1732519786338 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:16,984 WARN [FSHLog-0-hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData-prefix:5eb3d201e8c9,41597,1732519786338 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:16,984 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C41597%2C1732519786338:(num 1732519786903) roll requested 2024-11-25T07:30:16,985 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C41597%2C1732519786338.1732519816985 2024-11-25T07:30:16,988 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:16,988 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:16,988 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741889_1072 2024-11-25T07:30:16,988 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:16,990 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:16,990 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741890_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK], DatanodeInfoWithStorage[127.0.0.1:45801,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:16,990 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741890_1073 2024-11-25T07:30:16,990 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:16,994 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:16,994 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:16,995 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:16,995 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:16,995 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:16,995 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/WALs/5eb3d201e8c9,41597,1732519786338/5eb3d201e8c9%2C41597%2C1732519786338.1732519786903 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/WALs/5eb3d201e8c9,41597,1732519786338/5eb3d201e8c9%2C41597%2C1732519786338.1732519816985 2024-11-25T07:30:16,995 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:16,995 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:16,995 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/WALs/5eb3d201e8c9,41597,1732519786338/5eb3d201e8c9%2C41597%2C1732519786338.1732519786903 2024-11-25T07:30:16,996 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40375:40375),(127.0.0.1/127.0.0.1:42867:42867)] 2024-11-25T07:30:16,996 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/WALs/5eb3d201e8c9,41597,1732519786338/5eb3d201e8c9%2C41597%2C1732519786338.1732519786903 is not closed yet, will try archiving it next time 2024-11-25T07:30:16,996 WARN [IPC Server handler 4 on default port 34285 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/WALs/5eb3d201e8c9,41597,1732519786338/5eb3d201e8c9%2C41597%2C1732519786338.1732519786903 has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741830_1006 2024-11-25T07:30:16,996 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/WALs/5eb3d201e8c9,41597,1732519786338/5eb3d201e8c9%2C41597%2C1732519786338.1732519786903 after 1ms 2024-11-25T07:30:17,803 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:18,646 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:19,804 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:20,646 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:20,998 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/WALs/5eb3d201e8c9,41597,1732519786338/5eb3d201e8c9%2C41597%2C1732519786338.1732519786903 after 4002ms 2024-11-25T07:30:21,001 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@41258194 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-673609853-172.17.0.2-1732519785461:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:32821,null,null]) java.net.ConnectException: Call From 5eb3d201e8c9/172.17.0.2 to localhost:34923 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-25T07:30:21,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741833_1020 (size=455) 2024-11-25T07:30:21,590 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/oldWALs/5eb3d201e8c9%2C44859%2C1732519786393.1732519787236 2024-11-25T07:30:21,591 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519806581 to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/oldWALs/5eb3d201e8c9%2C44859%2C1732519786393.1732519806581 2024-11-25T07:30:21,804 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:21,981 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@70ae0229[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45801, datanodeUuid=0067738f-ec80-43c7-978e-8f5a2b9b9e66, infoPort=42867, infoSecurePort=0, ipcPort=45287, storageInfo=lv=-57;cid=testClusterID;nsid=489730459;c=1732519785461):Failed to transfer BP-673609853-172.17.0.2-1732519785461:blk_1073741833_1020 to 127.0.0.1:32821 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:22,647 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:23,804 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,586 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C44859%2C1732519786393.1732519824586 2024-11-25T07:30:24,590 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1870597311_22 at /127.0.0.1:36330 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741892_1076] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data4]'}, localName='127.0.0.1:45801', datanodeUuid='0067738f-ec80-43c7-978e-8f5a2b9b9e66', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741892_1076 to mirror 127.0.0.1:43413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,590 WARN [Thread-1018 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,590 WARN [Thread-1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45801,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:24,590 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1870597311_22 at /127.0.0.1:36330 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741892_1076] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-25T07:30:24,590 WARN [Thread-1018 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741892_1076 2024-11-25T07:30:24,590 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1870597311_22 at /127.0.0.1:36330 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741892_1076] {}] datanode.DataXceiver(331): 127.0.0.1:45801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36330 dst: /127.0.0.1:45801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,591 WARN [Thread-1018 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:24,595 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,595 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,595 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,595 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,596 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,596 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519808627 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519824586 2024-11-25T07:30:24,597 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42867:42867),(127.0.0.1/127.0.0.1:40375:40375)] 2024-11-25T07:30:24,597 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519808627 is not closed yet, will try archiving it next time 2024-11-25T07:30:24,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741878_1061 (size=12911) 2024-11-25T07:30:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44859 {}] regionserver.HRegion(8855): Flush requested on b9d5b52402b54731b92fe45510c70973 2024-11-25T07:30:24,601 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b9d5b52402b54731b92fe45510c70973 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-25T07:30:24,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/05a2f38b03724bac915d50358d24b37c is 1080, key is row0013/info:/1732519824598/Put/seqid=0 2024-11-25T07:30:24,607 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,608 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:24,608 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741894_1078 2024-11-25T07:30:24,608 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:24,611 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1079 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,610 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36346 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741895_1079] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data4]'}, localName='127.0.0.1:45801', datanodeUuid='0067738f-ec80-43c7-978e-8f5a2b9b9e66', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741895_1079 to mirror 127.0.0.1:43413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,611 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741895_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45801,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:24,611 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741895_1079 2024-11-25T07:30:24,611 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36346 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741895_1079] {}] datanode.BlockReceiver(316): Block 1073741895 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:24,611 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36346 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741895_1079] {}] datanode.DataXceiver(331): 127.0.0.1:45801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36346 dst: /127.0.0.1:45801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,611 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:24,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741896_1080 (size=8190) 2024-11-25T07:30:24,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741896_1080 (size=8190) 2024-11-25T07:30:24,619 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/05a2f38b03724bac915d50358d24b37c 2024-11-25T07:30:24,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/05a2f38b03724bac915d50358d24b37c as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/05a2f38b03724bac915d50358d24b37c 2024-11-25T07:30:24,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/05a2f38b03724bac915d50358d24b37c, entries=3, sequenceid=66, filesize=8.0 K 2024-11-25T07:30:24,634 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for b9d5b52402b54731b92fe45510c70973 in 33ms, sequenceid=66, compaction requested=false 2024-11-25T07:30:24,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b9d5b52402b54731b92fe45510c70973: 2024-11-25T07:30:24,635 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-25T07:30:24,635 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:30:24,635 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/1ac8c640d29b48c58be3d48103e08be5 because midkey is the same as first or last row 2024-11-25T07:30:24,647 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-25T07:30:24,647 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T07:30:24,819 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:30:24,819 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:30:24,819 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:30:24,819 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:30:24,819 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T07:30:24,819 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T07:30:24,819 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=575691630, stopped=false 2024-11-25T07:30:24,820 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5eb3d201e8c9,41597,1732519786338 2024-11-25T07:30:24,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:30:24,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44807-0x1014e07e1e10002, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:30:24,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:30:24,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:24,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44807-0x1014e07e1e10002, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:24,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:24,822 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:30:24,822 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:30:24,822 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:30:24,822 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:30:24,822 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5eb3d201e8c9,44859,1732519786393' ***** 2024-11-25T07:30:24,822 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T07:30:24,822 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5eb3d201e8c9,44807,1732519787746' ***** 2024-11-25T07:30:24,822 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T07:30:24,822 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T07:30:24,822 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T07:30:24,822 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:30:24,823 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:30:24,823 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T07:30:24,823 INFO [RS:1;5eb3d201e8c9:44807 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T07:30:24,823 INFO [RS:0;5eb3d201e8c9:44859 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T07:30:24,823 INFO [RS:0;5eb3d201e8c9:44859 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T07:30:24,823 INFO [RS:1;5eb3d201e8c9:44807 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T07:30:24,823 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.HRegionServer(959): stopping server 5eb3d201e8c9,44807,1732519787746 2024-11-25T07:30:24,823 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:30:24,823 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(3091): Received CLOSE for b9d5b52402b54731b92fe45510c70973 2024-11-25T07:30:24,823 INFO [RS:1;5eb3d201e8c9:44807 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;5eb3d201e8c9:44807. 2024-11-25T07:30:24,823 DEBUG [RS:1;5eb3d201e8c9:44807 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:30:24,823 DEBUG [RS:1;5eb3d201e8c9:44807 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:30:24,823 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.HRegionServer(976): stopping server 5eb3d201e8c9,44807,1732519787746; all regions closed. 2024-11-25T07:30:24,823 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(959): stopping server 5eb3d201e8c9,44859,1732519786393 2024-11-25T07:30:24,823 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:30:24,823 INFO [RS:0;5eb3d201e8c9:44859 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5eb3d201e8c9:44859. 2024-11-25T07:30:24,823 DEBUG [RS:0;5eb3d201e8c9:44859 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:30:24,823 DEBUG [RS:0;5eb3d201e8c9:44859 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:30:24,823 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T07:30:24,823 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T07:30:24,824 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T07:30:24,824 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T07:30:24,824 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44807-0x1014e07e1e10002, quorum=127.0.0.1:56970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:30:24,824 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T07:30:24,824 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-25T07:30:24,824 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b9d5b52402b54731b92fe45510c70973, disabling compactions & flushes 2024-11-25T07:30:24,824 DEBUG [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(1325): Online Regions={b9d5b52402b54731b92fe45510c70973=TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973., 1588230740=hbase:meta,,1.1588230740} 2024-11-25T07:30:24,824 DEBUG [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b9d5b52402b54731b92fe45510c70973 2024-11-25T07:30:24,824 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:30:24,824 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,824 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:30:24,824 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,824 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. after waiting 0 ms 2024-11-25T07:30:24,824 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:30:24,824 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:30:24,824 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,824 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:30:24,824 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:30:24,824 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,825 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:30:24,825 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing b9d5b52402b54731b92fe45510c70973 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-25T07:30:24,825 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:30:24,825 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,825 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-25T07:30:24,825 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,825 ERROR [FSHLog-0-hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a-prefix:5eb3d201e8c9,44859,1732519786393.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,825 WARN [FSHLog-0-hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a-prefix:5eb3d201e8c9,44859,1732519786393.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,825 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,825 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 2024-11-25T07:30:24,825 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C44859%2C1732519786393.meta:.meta(num 1732519787635) roll requested 2024-11-25T07:30:24,826 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519824825.meta 2024-11-25T07:30:24,826 WARN [IPC Server handler 1 on default port 34285 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 has not been closed. Lease recovery is in progress. RecoveryId = 1081 for block blk_1073741837_1013 2024-11-25T07:30:24,826 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 after 1ms 2024-11-25T07:30:24,828 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,828 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741897_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:24,828 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741897_1082 2024-11-25T07:30:24,829 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:24,830 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/bc715b83d4a74322bd6adbba51afae89 is 1080, key is row0015/info:/1732519824602/Put/seqid=0 2024-11-25T07:30:24,831 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36376 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741898_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data4]'}, localName='127.0.0.1:45801', datanodeUuid='0067738f-ec80-43c7-978e-8f5a2b9b9e66', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741898_1083 to mirror 127.0.0.1:43413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,831 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,831 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36376 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741898_1083] {}] datanode.BlockReceiver(316): Block 1073741898 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-25T07:30:24,832 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741898_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45801,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:24,832 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741898_1083 2024-11-25T07:30:24,832 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36376 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741898_1083] {}] datanode.DataXceiver(331): 127.0.0.1:45801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36376 dst: /127.0.0.1:45801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,832 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:24,832 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,832 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741899_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]) is bad. 2024-11-25T07:30:24,832 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741899_1084 2024-11-25T07:30:24,833 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK] 2024-11-25T07:30:24,833 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,833 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741900_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]) is bad. 2024-11-25T07:30:24,833 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741900_1085 2024-11-25T07:30:24,834 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK] 2024-11-25T07:30:24,835 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,835 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:45828 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741901_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741901_1086 to mirror 127.0.0.1:43413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,835 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741901_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:24,835 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:45828 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741901_1086] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:24,835 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741901_1086 2024-11-25T07:30:24,835 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:45828 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741901_1086] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45828 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,835 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:24,837 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,837 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741903_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:24,837 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741903_1088 2024-11-25T07:30:24,837 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:24,847 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,848 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,848 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,848 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,848 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:24,848 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519824825.meta 2024-11-25T07:30:24,849 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,849 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,849 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta 2024-11-25T07:30:24,850 WARN [IPC Server handler 1 on default port 34285 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta has not been closed. Lease recovery is in progress. RecoveryId = 1090 for block blk_1073741834_1010 2024-11-25T07:30:24,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741904_1089 (size=14660) 2024-11-25T07:30:24,850 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta after 1ms 2024-11-25T07:30:24,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741904_1089 (size=14660) 2024-11-25T07:30:24,851 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/bc715b83d4a74322bd6adbba51afae89 2024-11-25T07:30:24,857 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42867:42867),(127.0.0.1/127.0.0.1:40375:40375)] 2024-11-25T07:30:24,857 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta is not closed yet, will try archiving it next time 2024-11-25T07:30:24,858 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/.tmp/info/bc715b83d4a74322bd6adbba51afae89 as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/bc715b83d4a74322bd6adbba51afae89 2024-11-25T07:30:24,864 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/bc715b83d4a74322bd6adbba51afae89, entries=9, sequenceid=78, filesize=14.3 K 2024-11-25T07:30:24,866 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for b9d5b52402b54731b92fe45510c70973 in 42ms, sequenceid=78, compaction requested=true 2024-11-25T07:30:24,866 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/a1703d8d12df468885273d28547e48f7, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/fee695575c9a468392b18fb4d57a430e, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/7de6f7bb5fc848ddb1553bcd31ee6a87, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/70e69983da0a4d898dee8e3ca799206d, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/5057b188465148b0ae0a94f301a72701, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/d5ef80589a3d43d6816678dfd7c82182] to archive 2024-11-25T07:30:24,867 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T07:30:24,870 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/a1703d8d12df468885273d28547e48f7 to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/a1703d8d12df468885273d28547e48f7 2024-11-25T07:30:24,871 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/fee695575c9a468392b18fb4d57a430e to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/fee695575c9a468392b18fb4d57a430e 2024-11-25T07:30:24,873 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/7de6f7bb5fc848ddb1553bcd31ee6a87 to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/7de6f7bb5fc848ddb1553bcd31ee6a87 2024-11-25T07:30:24,875 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/70e69983da0a4d898dee8e3ca799206d to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/70e69983da0a4d898dee8e3ca799206d 2024-11-25T07:30:24,876 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/5057b188465148b0ae0a94f301a72701 to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/5057b188465148b0ae0a94f301a72701 2024-11-25T07:30:24,876 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/.tmp/info/e8981991b1994743aee8cce51b3f1e07 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973./info:regioninfo/1732519788215/Put/seqid=0 2024-11-25T07:30:24,877 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/d5ef80589a3d43d6816678dfd7c82182 to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/info/d5ef80589a3d43d6816678dfd7c82182 2024-11-25T07:30:24,878 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5eb3d201e8c9:41597 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-25T07:30:24,878 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,878 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a1703d8d12df468885273d28547e48f7=10347, fee695575c9a468392b18fb4d57a430e=12506, 7de6f7bb5fc848ddb1553bcd31ee6a87=17994, 70e69983da0a4d898dee8e3ca799206d=6027, 5057b188465148b0ae0a94f301a72701=6027, d5ef80589a3d43d6816678dfd7c82182=6027] 2024-11-25T07:30:24,879 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741905_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:24,879 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741905_1091 2024-11-25T07:30:24,879 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:24,882 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1092 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44733 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,882 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36396 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741906_1092] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data4]'}, localName='127.0.0.1:45801', datanodeUuid='0067738f-ec80-43c7-978e-8f5a2b9b9e66', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741906_1092 to mirror 127.0.0.1:44733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,882 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741906_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45801,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:24,882 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741906_1092 2024-11-25T07:30:24,882 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36396 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741906_1092] {}] datanode.BlockReceiver(316): Block 1073741906 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:24,882 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36396 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741906_1092] {}] datanode.DataXceiver(331): 127.0.0.1:45801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36396 dst: /127.0.0.1:45801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,883 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:24,883 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b9d5b52402b54731b92fe45510c70973/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-25T07:30:24,884 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:30:24,884 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b9d5b52402b54731b92fe45510c70973: Waiting for close lock at 1732519824823Running coprocessor pre-close hooks at 1732519824823Disabling compacts and flushes for region at 1732519824823Disabling writes for close at 1732519824824 (+1 ms)Obtaining lock to block concurrent updates at 1732519824825 (+1 ms)Preparing flush snapshotting stores in b9d5b52402b54731b92fe45510c70973 at 1732519824825Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732519824825Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. at 1732519824826 (+1 ms)Flushing b9d5b52402b54731b92fe45510c70973/info: creating writer at 1732519824827 (+1 ms)Flushing b9d5b52402b54731b92fe45510c70973/info: appending metadata at 1732519824830 (+3 ms)Flushing b9d5b52402b54731b92fe45510c70973/info: closing flushed file at 1732519824830Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@360021b3: reopening flushed file at 1732519824857 (+27 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for b9d5b52402b54731b92fe45510c70973 in 42ms, sequenceid=78, compaction requested=true at 1732519824866 (+9 ms)Writing region close event to WAL at 1732519824879 (+13 ms)Running coprocessor post-close hooks at 1732519824884 (+5 ms)Closed at 1732519824884 2024-11-25T07:30:24,884 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732519787846.b9d5b52402b54731b92fe45510c70973. 2024-11-25T07:30:24,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741907_1093 (size=7089) 2024-11-25T07:30:24,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741907_1093 (size=7089) 2024-11-25T07:30:24,890 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/.tmp/info/e8981991b1994743aee8cce51b3f1e07 2024-11-25T07:30:24,891 INFO [regionserver/5eb3d201e8c9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-25T07:30:24,891 INFO [regionserver/5eb3d201e8c9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-25T07:30:24,912 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/.tmp/ns/df522c77bb1a485886f7095a849c5046 is 43, key is default/ns:d/1732519787679/Put/seqid=0 2024-11-25T07:30:24,914 WARN [Thread-1055 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741908_1094 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44733 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,914 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:45860 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741908_1094] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6]'}, localName='127.0.0.1:34615', datanodeUuid='a3e14720-90fd-4867-92b6-5130e1c60561', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741908_1094 to mirror 127.0.0.1:44733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,915 WARN [Thread-1055 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741908_1094 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK], DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:24,915 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:45860 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741908_1094] {}] datanode.BlockReceiver(316): Block 1073741908 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:24,915 WARN [Thread-1055 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741908_1094 2024-11-25T07:30:24,915 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:45860 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741908_1094] {}] datanode.DataXceiver(331): 127.0.0.1:34615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45860 dst: /127.0.0.1:34615 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,915 WARN [Thread-1055 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:24,916 WARN [Thread-1055 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741909_1095 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,917 WARN [Thread-1055 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741909_1095 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK], DatanodeInfoWithStorage[127.0.0.1:34615,DS-ffc0030a-f01a-420e-9fa7-90954efaf9b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK]) is bad. 2024-11-25T07:30:24,917 WARN [Thread-1055 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741909_1095 2024-11-25T07:30:24,917 WARN [Thread-1055 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43413,DS-7e215f82-20f1-4a8c-8186-1871d004bbf5,DISK] 2024-11-25T07:30:24,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741910_1096 (size=5153) 2024-11-25T07:30:24,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741910_1096 (size=5153) 2024-11-25T07:30:24,923 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/.tmp/ns/df522c77bb1a485886f7095a849c5046 2024-11-25T07:30:24,945 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/.tmp/table/a84dd990c2924420b967ac757b6dd7fb is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732519788227/Put/seqid=0 2024-11-25T07:30:24,947 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741911_1097 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,947 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741911_1097 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK], DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK]) is bad. 2024-11-25T07:30:24,948 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741911_1097 2024-11-25T07:30:24,948 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32821,DS-23d58dea-54b0-41d9-a032-7e85b064f6c2,DISK] 2024-11-25T07:30:24,950 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741912_1098 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44733 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:24,951 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-673609853-172.17.0.2-1732519785461:blk_1073741912_1098 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45801,DS-2e563855-25ac-4d93-b585-1e5288732181,DISK], DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK]) is bad. 2024-11-25T07:30:24,950 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36424 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741912_1098] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data4]'}, localName='127.0.0.1:45801', datanodeUuid='0067738f-ec80-43c7-978e-8f5a2b9b9e66', xmitsInProgress=0}:Exception transferring block BP-673609853-172.17.0.2-1732519785461:blk_1073741912_1098 to mirror 127.0.0.1:44733 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,951 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-673609853-172.17.0.2-1732519785461:blk_1073741912_1098 2024-11-25T07:30:24,951 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36424 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741912_1098] {}] datanode.BlockReceiver(316): Block 1073741912 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T07:30:24,951 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_962618218_22 at /127.0.0.1:36424 [Receiving block BP-673609853-172.17.0.2-1732519785461:blk_1073741912_1098] {}] datanode.DataXceiver(331): 127.0.0.1:45801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36424 dst: /127.0.0.1:45801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:24,951 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44733,DS-8bdf00cf-1033-447a-adca-1e451258426d,DISK] 2024-11-25T07:30:24,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741913_1099 (size=5424) 2024-11-25T07:30:24,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741913_1099 (size=5424) 2024-11-25T07:30:24,957 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/.tmp/table/a84dd990c2924420b967ac757b6dd7fb 2024-11-25T07:30:24,963 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/.tmp/info/e8981991b1994743aee8cce51b3f1e07 as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/info/e8981991b1994743aee8cce51b3f1e07 2024-11-25T07:30:24,969 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/info/e8981991b1994743aee8cce51b3f1e07, entries=10, sequenceid=11, filesize=6.9 K 2024-11-25T07:30:24,970 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/.tmp/ns/df522c77bb1a485886f7095a849c5046 as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/ns/df522c77bb1a485886f7095a849c5046 2024-11-25T07:30:24,975 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/ns/df522c77bb1a485886f7095a849c5046, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T07:30:24,976 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/.tmp/table/a84dd990c2924420b967ac757b6dd7fb as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/table/a84dd990c2924420b967ac757b6dd7fb 2024-11-25T07:30:24,981 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/table/a84dd990c2924420b967ac757b6dd7fb, entries=2, sequenceid=11, filesize=5.3 K 2024-11-25T07:30:24,983 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 157ms, sequenceid=11, compaction requested=false 2024-11-25T07:30:24,988 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T07:30:24,989 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:30:24,989 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:30:24,989 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519824824Running coprocessor pre-close hooks at 1732519824824Disabling compacts and flushes for region at 1732519824824Disabling writes for close at 1732519824825 (+1 ms)Obtaining lock to block concurrent updates at 1732519824825Preparing flush snapshotting stores in 1588230740 at 1732519824825Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732519824825Flushing stores of hbase:meta,,1.1588230740 at 1732519824857 (+32 ms)Flushing 1588230740/info: creating writer at 1732519824857Flushing 1588230740/info: appending metadata at 1732519824876 (+19 ms)Flushing 1588230740/info: closing flushed file at 1732519824876Flushing 1588230740/ns: creating writer at 1732519824896 (+20 ms)Flushing 1588230740/ns: appending metadata at 1732519824911 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732519824911Flushing 1588230740/table: creating writer at 1732519824929 (+18 ms)Flushing 1588230740/table: appending metadata at 1732519824945 (+16 ms)Flushing 1588230740/table: closing flushed file at 1732519824945Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c935415: reopening flushed file at 1732519824962 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ba552c7: reopening flushed file at 1732519824969 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12e46d05: reopening flushed file at 1732519824975 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 157ms, sequenceid=11, compaction requested=false at 1732519824983 (+8 ms)Writing region close event to WAL at 1732519824985 (+2 ms)Running coprocessor post-close hooks at 1732519824989 (+4 ms)Closed at 1732519824989 2024-11-25T07:30:24,989 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T07:30:24,998 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.1732519808627 to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/oldWALs/5eb3d201e8c9%2C44859%2C1732519786393.1732519808627 2024-11-25T07:30:25,024 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(976): stopping server 5eb3d201e8c9,44859,1732519786393; all regions closed. 2024-11-25T07:30:25,025 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:25,025 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:25,025 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:25,025 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:25,025 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:25,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741902_1087 (size=825) 2024-11-25T07:30:25,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741902_1087 (size=825) 2024-11-25T07:30:25,101 INFO [regionserver/5eb3d201e8c9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-25T07:30:25,101 INFO [regionserver/5eb3d201e8c9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-25T07:30:25,102 INFO [regionserver/5eb3d201e8c9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:30:25,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741878_1061 (size=12911) 2024-11-25T07:30:25,761 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-25T07:30:25,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:30:25,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T07:30:25,801 INFO [regionserver/5eb3d201e8c9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:30:27,694 INFO [master/5eb3d201e8c9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-25T07:30:27,694 INFO [master/5eb3d201e8c9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-25T07:30:27,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741836_1012 (size=76) 2024-11-25T07:30:27,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741832_1008 (size=32) 2024-11-25T07:30:28,827 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 after 4002ms 2024-11-25T07:30:28,851 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta after 4002ms 2024-11-25T07:30:28,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:30:28,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:30:29,826 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-25T07:30:29,828 DEBUG [RS:1;5eb3d201e8c9:44807 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/oldWALs 2024-11-25T07:30:29,828 INFO [RS:1;5eb3d201e8c9:44807 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C44807%2C1732519787746:(num 1732519787946) 2024-11-25T07:30:29,828 DEBUG [RS:1;5eb3d201e8c9:44807 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:30:29,828 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:30:29,828 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:30:29,828 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.ChoreService(370): Chore service for: regionserver/5eb3d201e8c9:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T07:30:29,828 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T07:30:29,828 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T07:30:29,828 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:30:29,828 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T07:30:29,828 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:30:29,828 INFO [RS:1;5eb3d201e8c9:44807 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44807 2024-11-25T07:30:29,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:30:29,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44807-0x1014e07e1e10002, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5eb3d201e8c9,44807,1732519787746 2024-11-25T07:30:29,831 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:30:29,832 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5eb3d201e8c9,44807,1732519787746] 2024-11-25T07:30:29,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:29,834 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5eb3d201e8c9,44807,1732519787746 already deleted, retry=false 2024-11-25T07:30:29,834 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5eb3d201e8c9,44807,1732519787746 expired; onlineServers=1 2024-11-25T07:30:29,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:29,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:29,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:29,900 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:29,900 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:29,900 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:29,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:29,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:29,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44807-0x1014e07e1e10002, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:30:29,932 INFO [RS:1;5eb3d201e8c9:44807 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:30:29,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44807-0x1014e07e1e10002, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:30:29,932 INFO [RS:1;5eb3d201e8c9:44807 {}] regionserver.HRegionServer(1031): Exiting; stopping=5eb3d201e8c9,44807,1732519787746; zookeeper connection closed. 2024-11-25T07:30:29,932 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@49ac6b3b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@49ac6b3b 2024-11-25T07:30:30,026 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-25T07:30:30,029 DEBUG [RS:0;5eb3d201e8c9:44859 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/oldWALs 2024-11-25T07:30:30,029 INFO [RS:0;5eb3d201e8c9:44859 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C44859%2C1732519786393.meta:.meta(num 1732519824825) 2024-11-25T07:30:30,029 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:30,029 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:30,029 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:30,029 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:30,030 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:30,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741893_1077 (size=14682) 2024-11-25T07:30:30,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741893_1077 (size=14682) 2024-11-25T07:30:30,034 DEBUG [RS:0;5eb3d201e8c9:44859 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/oldWALs 2024-11-25T07:30:30,034 INFO [RS:0;5eb3d201e8c9:44859 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C44859%2C1732519786393:(num 1732519824586) 2024-11-25T07:30:30,034 DEBUG [RS:0;5eb3d201e8c9:44859 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:30:30,034 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:30:30,034 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:30:30,034 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.ChoreService(370): Chore service for: regionserver/5eb3d201e8c9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-25T07:30:30,034 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:30:30,034 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:30:30,034 INFO [RS:0;5eb3d201e8c9:44859 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44859 2024-11-25T07:30:30,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5eb3d201e8c9,44859,1732519786393 2024-11-25T07:30:30,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:30:30,036 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:30:30,038 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5eb3d201e8c9,44859,1732519786393] 2024-11-25T07:30:30,039 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5eb3d201e8c9,44859,1732519786393 already deleted, retry=false 2024-11-25T07:30:30,039 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5eb3d201e8c9,44859,1732519786393 expired; onlineServers=0 2024-11-25T07:30:30,039 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5eb3d201e8c9,41597,1732519786338' ***** 2024-11-25T07:30:30,039 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T07:30:30,039 INFO [M:0;5eb3d201e8c9:41597 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:30:30,039 INFO [M:0;5eb3d201e8c9:41597 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:30:30,039 DEBUG [M:0;5eb3d201e8c9:41597 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T07:30:30,039 DEBUG [M:0;5eb3d201e8c9:41597 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T07:30:30,039 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T07:30:30,039 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519786985 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519786985,5,FailOnTimeoutGroup] 2024-11-25T07:30:30,039 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519786985 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519786985,5,FailOnTimeoutGroup] 2024-11-25T07:30:30,039 INFO [M:0;5eb3d201e8c9:41597 {}] hbase.ChoreService(370): Chore service for: master/5eb3d201e8c9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T07:30:30,039 INFO [M:0;5eb3d201e8c9:41597 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:30:30,040 DEBUG [M:0;5eb3d201e8c9:41597 {}] master.HMaster(1795): Stopping service threads 2024-11-25T07:30:30,040 INFO [M:0;5eb3d201e8c9:41597 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T07:30:30,040 INFO [M:0;5eb3d201e8c9:41597 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:30:30,040 INFO [M:0;5eb3d201e8c9:41597 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T07:30:30,040 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T07:30:30,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T07:30:30,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:30,043 DEBUG [M:0;5eb3d201e8c9:41597 {}] zookeeper.ZKUtil(347): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T07:30:30,043 WARN [M:0;5eb3d201e8c9:41597 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T07:30:30,044 INFO [M:0;5eb3d201e8c9:41597 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/.lastflushedseqids 2024-11-25T07:30:30,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741914_1100 (size=130) 2024-11-25T07:30:30,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741914_1100 (size=130) 2024-11-25T07:30:30,050 INFO [M:0;5eb3d201e8c9:41597 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T07:30:30,050 INFO [M:0;5eb3d201e8c9:41597 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T07:30:30,050 DEBUG [M:0;5eb3d201e8c9:41597 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:30:30,050 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:30:30,050 DEBUG [M:0;5eb3d201e8c9:41597 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:30:30,050 DEBUG [M:0;5eb3d201e8c9:41597 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:30:30,050 DEBUG [M:0;5eb3d201e8c9:41597 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:30:30,050 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-25T07:30:30,067 DEBUG [M:0;5eb3d201e8c9:41597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea2a413e73fa462690e0039773c14b02 is 82, key is hbase:meta,,1/info:regioninfo/1732519787661/Put/seqid=0 2024-11-25T07:30:30,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741915_1101 (size=5672) 2024-11-25T07:30:30,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741915_1101 (size=5672) 2024-11-25T07:30:30,072 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea2a413e73fa462690e0039773c14b02 2024-11-25T07:30:30,092 DEBUG [M:0;5eb3d201e8c9:41597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/253363d7141242eda69255cf1b96a234 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732519788234/Put/seqid=0 2024-11-25T07:30:30,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741916_1102 (size=6255) 2024-11-25T07:30:30,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741916_1102 (size=6255) 2024-11-25T07:30:30,097 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/253363d7141242eda69255cf1b96a234 2024-11-25T07:30:30,102 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 253363d7141242eda69255cf1b96a234 2024-11-25T07:30:30,117 DEBUG [M:0;5eb3d201e8c9:41597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7b5d561def5a42f3a834c48c4a6e38e4 is 69, key is 5eb3d201e8c9,44807,1732519787746/rs:state/1732519787782/Put/seqid=0 2024-11-25T07:30:30,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741917_1103 (size=5224) 2024-11-25T07:30:30,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741917_1103 (size=5224) 2024-11-25T07:30:30,122 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7b5d561def5a42f3a834c48c4a6e38e4 2024-11-25T07:30:30,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:30:30,138 INFO [RS:0;5eb3d201e8c9:44859 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:30:30,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44859-0x1014e07e1e10001, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:30:30,138 INFO [RS:0;5eb3d201e8c9:44859 {}] regionserver.HRegionServer(1031): Exiting; stopping=5eb3d201e8c9,44859,1732519786393; zookeeper connection closed. 2024-11-25T07:30:30,138 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3c83cbb6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3c83cbb6 2024-11-25T07:30:30,138 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-25T07:30:30,141 DEBUG [M:0;5eb3d201e8c9:41597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1a94a8757ae74121b599ffc6bd122318 is 52, key is load_balancer_on/state:d/1732519787726/Put/seqid=0 2024-11-25T07:30:30,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741918_1104 (size=5056) 2024-11-25T07:30:30,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741918_1104 (size=5056) 2024-11-25T07:30:30,146 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1a94a8757ae74121b599ffc6bd122318 2024-11-25T07:30:30,152 DEBUG [M:0;5eb3d201e8c9:41597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea2a413e73fa462690e0039773c14b02 as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ea2a413e73fa462690e0039773c14b02 2024-11-25T07:30:30,156 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ea2a413e73fa462690e0039773c14b02, entries=8, sequenceid=60, filesize=5.5 K 2024-11-25T07:30:30,157 DEBUG [M:0;5eb3d201e8c9:41597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/253363d7141242eda69255cf1b96a234 as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/253363d7141242eda69255cf1b96a234 2024-11-25T07:30:30,162 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 253363d7141242eda69255cf1b96a234 2024-11-25T07:30:30,162 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/253363d7141242eda69255cf1b96a234, entries=6, sequenceid=60, filesize=6.1 K 2024-11-25T07:30:30,163 DEBUG [M:0;5eb3d201e8c9:41597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7b5d561def5a42f3a834c48c4a6e38e4 as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7b5d561def5a42f3a834c48c4a6e38e4 2024-11-25T07:30:30,168 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7b5d561def5a42f3a834c48c4a6e38e4, entries=2, sequenceid=60, filesize=5.1 K 2024-11-25T07:30:30,169 DEBUG [M:0;5eb3d201e8c9:41597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1a94a8757ae74121b599ffc6bd122318 as hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1a94a8757ae74121b599ffc6bd122318 2024-11-25T07:30:30,173 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1a94a8757ae74121b599ffc6bd122318, entries=1, sequenceid=60, filesize=4.9 K 2024-11-25T07:30:30,174 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=60, compaction requested=false 2024-11-25T07:30:30,176 INFO [M:0;5eb3d201e8c9:41597 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:30:30,176 DEBUG [M:0;5eb3d201e8c9:41597 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519830050Disabling compacts and flushes for region at 1732519830050Disabling writes for close at 1732519830050Obtaining lock to block concurrent updates at 1732519830050Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732519830050Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732519830051 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732519830051Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732519830051Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732519830067 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732519830067Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732519830077 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732519830091 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732519830091Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732519830102 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732519830116 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732519830116Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732519830127 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732519830140 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732519830140Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68f109f6: reopening flushed file at 1732519830151 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33768626: reopening flushed file at 1732519830157 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c7ebb45: reopening flushed file at 1732519830162 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14a8e1f3: reopening flushed file at 1732519830168 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=60, compaction requested=false at 1732519830174 (+6 ms)Writing region close event to WAL at 1732519830176 (+2 ms)Closed at 1732519830176 2024-11-25T07:30:30,177 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:30,177 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:30,177 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:30,177 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:30,177 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:30,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45801 is added to blk_1073741891_1074 (size=1045) 2024-11-25T07:30:30,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741891_1074 (size=1045) 2024-11-25T07:30:30,412 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T07:30:30,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:30,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:30,431 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:30,431 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:30,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:30,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:30,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:30,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:30,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:30,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:30,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:30:30,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741835_1011 (size=393) 2024-11-25T07:30:31,004 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@34afb2e0 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-673609853-172.17.0.2-1732519785461:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:32821,null,null]) java.net.ConnectException: Call From 5eb3d201e8c9/172.17.0.2 to localhost:34923 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-25T07:30:31,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:31,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:31,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:30:31,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34615 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:30:32,004 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/WALs/5eb3d201e8c9,41597,1732519786338/5eb3d201e8c9%2C41597%2C1732519786338.1732519786903 to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/oldWALs/5eb3d201e8c9%2C41597%2C1732519786338.1732519786903 2024-11-25T07:30:32,007 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/MasterData/oldWALs/5eb3d201e8c9%2C41597%2C1732519786338.1732519786903 to hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/oldWALs/5eb3d201e8c9%2C41597%2C1732519786338.1732519786903$masterlocalwal$ 2024-11-25T07:30:32,007 INFO [M:0;5eb3d201e8c9:41597 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T07:30:32,007 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:30:32,008 INFO [M:0;5eb3d201e8c9:41597 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41597 2024-11-25T07:30:32,008 INFO [M:0;5eb3d201e8c9:41597 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:30:32,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:30:32,110 INFO [M:0;5eb3d201e8c9:41597 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:30:32,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1014e07e1e10000, quorum=127.0.0.1:56970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:30:32,112 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@50153eb5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:32,112 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17162101{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:30:32,112 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:30:32,113 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@287f99dc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:30:32,113 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bf26ed1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,STOPPED} 2024-11-25T07:30:32,114 WARN [BP-673609853-172.17.0.2-1732519785461 heartbeating to localhost/127.0.0.1:34285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:30:32,114 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:30:32,114 WARN [BP-673609853-172.17.0.2-1732519785461 heartbeating to localhost/127.0.0.1:34285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-673609853-172.17.0.2-1732519785461 (Datanode Uuid 0067738f-ec80-43c7-978e-8f5a2b9b9e66) service to localhost/127.0.0.1:34285 2024-11-25T07:30:32,114 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:30:32,114 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4cce6ff7 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-673609853-172.17.0.2-1732519785461:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:32821,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:34923 , LocalHost:localPort 5eb3d201e8c9/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-25T07:30:32,115 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4cce6ff7 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-673609853-172.17.0.2-1732519785461:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45801,null,null], DatanodeInfoWithStorage[127.0.0.1:32821,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-673609853-172.17.0.2-1732519785461 2024-11-25T07:30:32,115 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data3/current/BP-673609853-172.17.0.2-1732519785461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:32,115 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4cce6ff7 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-673609853-172.17.0.2-1732519785461:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45801,null,null]) java.io.IOException: No block pool offer service for bpid=BP-673609853-172.17.0.2-1732519785461 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:32,115 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4cce6ff7 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-673609853-172.17.0.2-1732519785461:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:32821,null,null]) java.io.IOException: No block pool offer service for bpid=BP-673609853-172.17.0.2-1732519785461 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:32,115 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4cce6ff7 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-673609853-172.17.0.2-1732519785461:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45801,null,null], DatanodeInfoWithStorage[127.0.0.1:32821,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-673609853-172.17.0.2-1732519785461:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:45801,null,null], DatanodeInfoWithStorage[127.0.0.1:32821,null,null]] 2024-11-25T07:30:32,115 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data4/current/BP-673609853-172.17.0.2-1732519785461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:32,115 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:30:32,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74c44b7d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:32,120 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@167a7fde{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:30:32,120 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:30:32,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d7890e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:30:32,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@627eec4d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,STOPPED} 2024-11-25T07:30:32,122 WARN [BP-673609853-172.17.0.2-1732519785461 heartbeating to localhost/127.0.0.1:34285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:30:32,122 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:30:32,122 WARN [BP-673609853-172.17.0.2-1732519785461 heartbeating to localhost/127.0.0.1:34285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-673609853-172.17.0.2-1732519785461 (Datanode Uuid a3e14720-90fd-4867-92b6-5130e1c60561) service to localhost/127.0.0.1:34285 2024-11-25T07:30:32,122 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:30:32,123 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data5/current/BP-673609853-172.17.0.2-1732519785461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:32,123 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/cluster_ee7bcebb-d4f5-81b9-453b-a74856f00c0a/data/data6/current/BP-673609853-172.17.0.2-1732519785461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:32,123 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:30:32,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c00ef51{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:30:32,129 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@73b9709e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:30:32,129 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:30:32,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42b52d44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:30:32,130 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@142d24a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir/,STOPPED} 2024-11-25T07:30:32,138 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T07:30:32,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T07:30:32,179 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 77) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34285 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34285 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34285 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34285 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:40849 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:34285 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f56e8bf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34285 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:34285 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40849 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f56e8bf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34285 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34285 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34285 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34285 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=446 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=199 (was 141) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8177 (was 8181) 2024-11-25T07:30:32,186 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=446, MaxFileDescriptor=1048576, SystemLoadAverage=199, ProcessCount=11, AvailableMemoryMB=8177 2024-11-25T07:30:32,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T07:30:32,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.log.dir so I do NOT create it in target/test-data/68338220-8420-5dff-ceaa-f6d618529025 2024-11-25T07:30:32,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e5d49d25-3ca3-c016-6e99-e560320256ff/hadoop.tmp.dir so I do NOT create it in target/test-data/68338220-8420-5dff-ceaa-f6d618529025 2024-11-25T07:30:32,187 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a, deleteOnExit=true 2024-11-25T07:30:32,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T07:30:32,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/test.cache.data in system properties and HBase conf 2024-11-25T07:30:32,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T07:30:32,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir in system properties and HBase conf 2024-11-25T07:30:32,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T07:30:32,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T07:30:32,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T07:30:32,187 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T07:30:32,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:30:32,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:30:32,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T07:30:32,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:30:32,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T07:30:32,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T07:30:32,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:30:32,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:30:32,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T07:30:32,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/nfs.dump.dir in system properties and HBase conf 2024-11-25T07:30:32,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/java.io.tmpdir in system properties and HBase conf 2024-11-25T07:30:32,189 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:30:32,189 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T07:30:32,189 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T07:30:32,202 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:30:32,279 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:30:32,283 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:30:32,285 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:30:32,285 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:30:32,285 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:30:32,285 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:30:32,286 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e3b8424{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:30:32,286 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25f63c50{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:30:32,400 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@33255ae1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/java.io.tmpdir/jetty-localhost-41963-hadoop-hdfs-3_4_1-tests_jar-_-any-7005607204420494444/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:30:32,401 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b6caabd{HTTP/1.1, (http/1.1)}{localhost:41963} 2024-11-25T07:30:32,401 INFO [Time-limited test {}] server.Server(415): Started @152248ms 2024-11-25T07:30:32,414 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:30:32,489 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:30:32,492 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:30:32,493 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:30:32,493 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:30:32,493 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:30:32,493 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57204301{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:30:32,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19160285{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:30:32,608 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70df7796{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/java.io.tmpdir/jetty-localhost-41703-hadoop-hdfs-3_4_1-tests_jar-_-any-7783958043560344746/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:32,608 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2efee71d{HTTP/1.1, (http/1.1)}{localhost:41703} 2024-11-25T07:30:32,609 INFO [Time-limited test {}] server.Server(415): Started @152456ms 2024-11-25T07:30:32,610 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:30:32,640 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:30:32,643 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:30:32,644 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:30:32,644 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:30:32,644 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:30:32,645 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@123783d2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:30:32,645 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47ffea33{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:30:32,707 WARN [Thread-1191 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data1/current/BP-704301945-172.17.0.2-1732519832220/current, will proceed with Du for space computation calculation, 2024-11-25T07:30:32,707 WARN [Thread-1192 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data2/current/BP-704301945-172.17.0.2-1732519832220/current, will proceed with Du for space computation calculation, 2024-11-25T07:30:32,724 WARN [Thread-1170 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:30:32,726 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe438b628c271d389 with lease ID 0xc1c95164e88c7b4c: Processing first storage report for DS-70a82119-7acc-4462-9ebd-5d7d216bf31a from datanode DatanodeRegistration(127.0.0.1:39907, datanodeUuid=b944ae9b-1e3c-4ad6-89ea-1fd1dbc1c46c, infoPort=34377, infoSecurePort=0, ipcPort=46267, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220) 2024-11-25T07:30:32,726 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe438b628c271d389 with lease ID 0xc1c95164e88c7b4c: from storage DS-70a82119-7acc-4462-9ebd-5d7d216bf31a node DatanodeRegistration(127.0.0.1:39907, datanodeUuid=b944ae9b-1e3c-4ad6-89ea-1fd1dbc1c46c, infoPort=34377, infoSecurePort=0, ipcPort=46267, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:30:32,726 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe438b628c271d389 with lease ID 0xc1c95164e88c7b4c: Processing first storage report for DS-9139ed92-a474-42a5-bdb4-26643ed67ce2 from datanode DatanodeRegistration(127.0.0.1:39907, datanodeUuid=b944ae9b-1e3c-4ad6-89ea-1fd1dbc1c46c, infoPort=34377, infoSecurePort=0, ipcPort=46267, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220) 2024-11-25T07:30:32,726 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe438b628c271d389 with lease ID 0xc1c95164e88c7b4c: from storage DS-9139ed92-a474-42a5-bdb4-26643ed67ce2 node DatanodeRegistration(127.0.0.1:39907, datanodeUuid=b944ae9b-1e3c-4ad6-89ea-1fd1dbc1c46c, infoPort=34377, infoSecurePort=0, ipcPort=46267, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:30:32,764 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3127d0f5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/java.io.tmpdir/jetty-localhost-41935-hadoop-hdfs-3_4_1-tests_jar-_-any-16051105832083173559/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:32,764 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@16178224{HTTP/1.1, (http/1.1)}{localhost:41935} 2024-11-25T07:30:32,764 INFO [Time-limited test {}] server.Server(415): Started @152611ms 2024-11-25T07:30:32,766 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:30:32,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:32,853 WARN [Thread-1217 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data3/current/BP-704301945-172.17.0.2-1732519832220/current, will proceed with Du for space computation calculation, 2024-11-25T07:30:32,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:32,853 WARN [Thread-1218 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data4/current/BP-704301945-172.17.0.2-1732519832220/current, will proceed with Du for space computation calculation, 2024-11-25T07:30:32,869 WARN [Thread-1206 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:30:32,871 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x282795c5dca48472 with lease ID 0xc1c95164e88c7b4d: Processing first storage report for DS-aba97906-0443-4941-9a40-b577bdb173f2 from datanode DatanodeRegistration(127.0.0.1:40409, datanodeUuid=716db90c-d81d-4ded-bc47-0f24b99e4835, infoPort=33329, infoSecurePort=0, ipcPort=37479, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220) 2024-11-25T07:30:32,872 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x282795c5dca48472 with lease ID 0xc1c95164e88c7b4d: from storage DS-aba97906-0443-4941-9a40-b577bdb173f2 node DatanodeRegistration(127.0.0.1:40409, datanodeUuid=716db90c-d81d-4ded-bc47-0f24b99e4835, infoPort=33329, infoSecurePort=0, ipcPort=37479, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:30:32,872 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x282795c5dca48472 with lease ID 0xc1c95164e88c7b4d: Processing first storage report for DS-7a834788-9021-4f4e-a348-f8093ff6eb17 from datanode DatanodeRegistration(127.0.0.1:40409, datanodeUuid=716db90c-d81d-4ded-bc47-0f24b99e4835, infoPort=33329, infoSecurePort=0, ipcPort=37479, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220) 2024-11-25T07:30:32,872 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x282795c5dca48472 with lease ID 0xc1c95164e88c7b4d: from storage DS-7a834788-9021-4f4e-a348-f8093ff6eb17 node DatanodeRegistration(127.0.0.1:40409, datanodeUuid=716db90c-d81d-4ded-bc47-0f24b99e4835, infoPort=33329, infoSecurePort=0, ipcPort=37479, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:30:32,890 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025 2024-11-25T07:30:32,892 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/zookeeper_0, clientPort=60916, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T07:30:32,893 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60916 2024-11-25T07:30:32,894 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:30:32,895 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:30:32,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39907 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:30:32,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40409 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:30:32,905 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74 with version=8 2024-11-25T07:30:32,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/hbase-staging 2024-11-25T07:30:32,906 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:30:32,906 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:30:32,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:30:32,907 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:30:32,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:30:32,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:30:32,907 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T07:30:32,907 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:30:32,908 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43681 2024-11-25T07:30:32,909 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43681 connecting to ZooKeeper ensemble=127.0.0.1:60916 2024-11-25T07:30:32,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:436810x0, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:30:32,919 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43681-0x1014e0897cb0000 connected 2024-11-25T07:30:32,940 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:30:32,941 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:30:32,943 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:30:32,943 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74, hbase.cluster.distributed=false 2024-11-25T07:30:32,945 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:30:32,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43681 2024-11-25T07:30:32,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43681 2024-11-25T07:30:32,946 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43681 2024-11-25T07:30:32,946 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43681 2024-11-25T07:30:32,946 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43681 2024-11-25T07:30:32,962 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:30:32,962 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:30:32,962 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:30:32,962 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:30:32,962 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:30:32,962 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:30:32,962 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T07:30:32,962 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:30:32,963 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42547 2024-11-25T07:30:32,964 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42547 connecting to ZooKeeper ensemble=127.0.0.1:60916 2024-11-25T07:30:32,965 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:30:32,967 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:30:32,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425470x0, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:30:32,971 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:425470x0, quorum=127.0.0.1:60916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:30:32,971 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42547-0x1014e0897cb0001 connected 2024-11-25T07:30:32,971 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T07:30:32,972 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T07:30:32,973 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T07:30:32,974 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:30:32,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42547 2024-11-25T07:30:32,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42547 2024-11-25T07:30:32,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42547 2024-11-25T07:30:32,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42547 2024-11-25T07:30:32,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42547 2024-11-25T07:30:32,986 DEBUG [M:0;5eb3d201e8c9:43681 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5eb3d201e8c9:43681 2024-11-25T07:30:32,986 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5eb3d201e8c9,43681,1732519832906 2024-11-25T07:30:32,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:30:32,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:30:32,989 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5eb3d201e8c9,43681,1732519832906 2024-11-25T07:30:32,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:32,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T07:30:32,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:32,992 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T07:30:32,992 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5eb3d201e8c9,43681,1732519832906 from backup master directory 2024-11-25T07:30:32,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5eb3d201e8c9,43681,1732519832906 2024-11-25T07:30:32,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:30:32,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:30:32,994 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:30:32,994 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5eb3d201e8c9,43681,1732519832906 2024-11-25T07:30:33,001 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/hbase.id] with ID: 276ce43b-3aca-482f-aad7-07e0228cdba3 2024-11-25T07:30:33,001 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/.tmp/hbase.id 2024-11-25T07:30:33,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40409 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:30:33,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39907 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:30:33,009 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/.tmp/hbase.id]:[hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/hbase.id] 2024-11-25T07:30:33,020 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:30:33,020 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T07:30:33,021 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T07:30:33,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:33,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:33,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40409 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:30:33,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39907 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:30:33,032 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T07:30:33,033 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T07:30:33,033 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:30:33,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39907 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:30:33,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40409 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:30:33,041 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store 2024-11-25T07:30:33,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39907 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:30:33,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40409 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:30:33,048 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:30:33,048 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:30:33,048 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:30:33,048 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:30:33,048 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:30:33,048 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:30:33,048 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:30:33,049 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519833048Disabling compacts and flushes for region at 1732519833048Disabling writes for close at 1732519833048Writing region close event to WAL at 1732519833048Closed at 1732519833048 2024-11-25T07:30:33,049 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/.initializing 2024-11-25T07:30:33,050 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/WALs/5eb3d201e8c9,43681,1732519832906 2024-11-25T07:30:33,052 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C43681%2C1732519832906, suffix=, logDir=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/WALs/5eb3d201e8c9,43681,1732519832906, archiveDir=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/oldWALs, maxLogs=10 2024-11-25T07:30:33,053 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C43681%2C1732519832906.1732519833053 2024-11-25T07:30:33,058 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/WALs/5eb3d201e8c9,43681,1732519832906/5eb3d201e8c9%2C43681%2C1732519832906.1732519833053 2024-11-25T07:30:33,061 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34377:34377),(127.0.0.1/127.0.0.1:33329:33329)] 2024-11-25T07:30:33,063 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:30:33,063 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:30:33,063 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:30:33,063 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:30:33,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:30:33,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T07:30:33,066 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:30:33,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:30:33,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T07:30:33,068 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:30:33,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:30:33,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T07:30:33,070 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:30:33,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:30:33,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T07:30:33,071 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:30:33,072 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:30:33,073 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:30:33,073 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:30:33,074 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:30:33,074 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:30:33,075 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T07:30:33,076 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:30:33,079 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:30:33,080 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748979, jitterRate=-0.04762503504753113}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T07:30:33,080 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732519833063Initializing all the Stores at 1732519833064 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519833064Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519833064Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519833064Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519833064Cleaning up temporary data from old regions at 1732519833074 (+10 ms)Region opened successfully at 1732519833080 (+6 ms) 2024-11-25T07:30:33,081 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T07:30:33,084 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13260395, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:30:33,085 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T07:30:33,085 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T07:30:33,085 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T07:30:33,086 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T07:30:33,086 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T07:30:33,086 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T07:30:33,087 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T07:30:33,090 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T07:30:33,091 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T07:30:33,092 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T07:30:33,092 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T07:30:33,094 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T07:30:33,096 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T07:30:33,097 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T07:30:33,098 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T07:30:33,099 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T07:30:33,100 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T07:30:33,101 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T07:30:33,102 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T07:30:33,103 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T07:30:33,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:30:33,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:30:33,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:33,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:33,107 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5eb3d201e8c9,43681,1732519832906, sessionid=0x1014e0897cb0000, setting cluster-up flag (Was=false) 2024-11-25T07:30:33,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:33,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:33,116 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T07:30:33,117 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,43681,1732519832906 2024-11-25T07:30:33,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:33,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:33,127 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T07:30:33,129 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,43681,1732519832906 2024-11-25T07:30:33,130 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T07:30:33,132 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T07:30:33,133 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T07:30:33,133 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T07:30:33,133 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5eb3d201e8c9,43681,1732519832906 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T07:30:33,134 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:30:33,134 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:30:33,134 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:30:33,134 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:30:33,134 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5eb3d201e8c9:0, corePoolSize=10, maxPoolSize=10 2024-11-25T07:30:33,134 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:30:33,135 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:30:33,135 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:30:33,136 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732519863136 2024-11-25T07:30:33,136 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T07:30:33,136 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T07:30:33,136 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T07:30:33,136 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T07:30:33,136 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T07:30:33,136 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T07:30:33,136 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,137 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:30:33,137 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T07:30:33,137 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T07:30:33,137 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T07:30:33,137 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T07:30:33,137 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T07:30:33,137 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T07:30:33,138 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,138 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T07:30:33,140 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519833137,5,FailOnTimeoutGroup] 2024-11-25T07:30:33,143 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519833140,5,FailOnTimeoutGroup] 2024-11-25T07:30:33,143 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,143 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T07:30:33,143 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,143 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39907 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:30:33,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40409 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:30:33,148 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T07:30:33,148 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74 2024-11-25T07:30:33,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40409 is added to blk_1073741832_1008 (size=32) 2024-11-25T07:30:33,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39907 is added to blk_1073741832_1008 (size=32) 2024-11-25T07:30:33,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:30:33,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:30:33,158 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:30:33,158 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:30:33,159 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:30:33,160 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:30:33,160 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,160 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:30:33,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:30:33,162 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:30:33,162 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:30:33,163 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:30:33,163 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:30:33,164 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,164 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:30:33,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:30:33,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740 2024-11-25T07:30:33,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740 2024-11-25T07:30:33,166 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:30:33,166 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:30:33,167 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:30:33,168 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:30:33,170 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:30:33,170 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=837217, jitterRate=0.06457668542861938}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:30:33,171 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732519833155Initializing all the Stores at 1732519833156 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519833156Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519833156Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519833156Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519833156Cleaning up temporary data from old regions at 1732519833166 (+10 ms)Region opened successfully at 1732519833171 (+5 ms) 2024-11-25T07:30:33,171 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:30:33,171 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:30:33,171 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:30:33,171 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:30:33,171 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:30:33,171 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:30:33,171 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519833171Disabling compacts and flushes for region at 1732519833171Disabling writes for close at 1732519833171Writing region close event to WAL at 1732519833171Closed at 1732519833171 2024-11-25T07:30:33,173 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:30:33,173 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T07:30:33,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T07:30:33,174 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:30:33,175 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T07:30:33,177 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(746): ClusterId : 276ce43b-3aca-482f-aad7-07e0228cdba3 2024-11-25T07:30:33,177 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T07:30:33,179 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T07:30:33,179 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T07:30:33,183 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T07:30:33,183 DEBUG [RS:0;5eb3d201e8c9:42547 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4955920a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:30:33,195 DEBUG [RS:0;5eb3d201e8c9:42547 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5eb3d201e8c9:42547 2024-11-25T07:30:33,195 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T07:30:33,195 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T07:30:33,195 DEBUG [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T07:30:33,196 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(2659): reportForDuty to master=5eb3d201e8c9,43681,1732519832906 with port=42547, startcode=1732519832962 2024-11-25T07:30:33,196 DEBUG [RS:0;5eb3d201e8c9:42547 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T07:30:33,198 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56099, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T07:30:33,198 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43681 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5eb3d201e8c9,42547,1732519832962 2024-11-25T07:30:33,198 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43681 {}] master.ServerManager(517): Registering regionserver=5eb3d201e8c9,42547,1732519832962 2024-11-25T07:30:33,200 DEBUG [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74 2024-11-25T07:30:33,200 DEBUG [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46285 2024-11-25T07:30:33,200 DEBUG [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T07:30:33,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:30:33,202 DEBUG [RS:0;5eb3d201e8c9:42547 {}] zookeeper.ZKUtil(111): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5eb3d201e8c9,42547,1732519832962 2024-11-25T07:30:33,202 WARN [RS:0;5eb3d201e8c9:42547 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:30:33,202 INFO [RS:0;5eb3d201e8c9:42547 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:30:33,202 DEBUG [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962 2024-11-25T07:30:33,202 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5eb3d201e8c9,42547,1732519832962] 2024-11-25T07:30:33,205 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T07:30:33,208 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T07:30:33,208 INFO [RS:0;5eb3d201e8c9:42547 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T07:30:33,208 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,208 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T07:30:33,209 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T07:30:33,209 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,209 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:30:33,209 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:30:33,209 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:30:33,209 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:30:33,209 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:30:33,209 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:30:33,210 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:30:33,210 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:30:33,210 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:30:33,210 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:30:33,210 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:30:33,210 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:30:33,210 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:30:33,210 DEBUG [RS:0;5eb3d201e8c9:42547 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:30:33,210 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,210 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,210 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,210 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,210 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,210 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,42547,1732519832962-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:30:33,225 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T07:30:33,225 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,42547,1732519832962-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,226 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,226 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.Replication(171): 5eb3d201e8c9,42547,1732519832962 started 2024-11-25T07:30:33,240 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,240 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(1482): Serving as 5eb3d201e8c9,42547,1732519832962, RpcServer on 5eb3d201e8c9/172.17.0.2:42547, sessionid=0x1014e0897cb0001 2024-11-25T07:30:33,240 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T07:30:33,240 DEBUG [RS:0;5eb3d201e8c9:42547 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5eb3d201e8c9,42547,1732519832962 2024-11-25T07:30:33,240 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,42547,1732519832962' 2024-11-25T07:30:33,240 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T07:30:33,241 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T07:30:33,241 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T07:30:33,241 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T07:30:33,241 DEBUG [RS:0;5eb3d201e8c9:42547 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5eb3d201e8c9,42547,1732519832962 2024-11-25T07:30:33,241 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,42547,1732519832962' 2024-11-25T07:30:33,241 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T07:30:33,241 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T07:30:33,242 DEBUG [RS:0;5eb3d201e8c9:42547 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T07:30:33,242 INFO [RS:0;5eb3d201e8c9:42547 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T07:30:33,242 INFO [RS:0;5eb3d201e8c9:42547 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T07:30:33,325 WARN [5eb3d201e8c9:43681 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T07:30:33,344 INFO [RS:0;5eb3d201e8c9:42547 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C42547%2C1732519832962, suffix=, logDir=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962, archiveDir=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/oldWALs, maxLogs=32 2024-11-25T07:30:33,344 INFO [RS:0;5eb3d201e8c9:42547 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 2024-11-25T07:30:33,350 INFO [RS:0;5eb3d201e8c9:42547 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 2024-11-25T07:30:33,352 DEBUG [RS:0;5eb3d201e8c9:42547 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33329:33329),(127.0.0.1/127.0.0.1:34377:34377)] 2024-11-25T07:30:33,576 DEBUG [5eb3d201e8c9:43681 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T07:30:33,576 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5eb3d201e8c9,42547,1732519832962 2024-11-25T07:30:33,578 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,42547,1732519832962, state=OPENING 2024-11-25T07:30:33,579 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T07:30:33,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:33,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:33,581 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:30:33,581 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:30:33,581 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:30:33,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,42547,1732519832962}] 2024-11-25T07:30:33,734 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T07:30:33,736 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53579, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T07:30:33,739 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T07:30:33,740 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:30:33,741 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C42547%2C1732519832962.meta, suffix=.meta, logDir=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962, archiveDir=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/oldWALs, maxLogs=32 2024-11-25T07:30:33,742 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C42547%2C1732519832962.meta.1732519833742.meta 2024-11-25T07:30:33,747 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.meta.1732519833742.meta 2024-11-25T07:30:33,748 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34377:34377),(127.0.0.1/127.0.0.1:33329:33329)] 2024-11-25T07:30:33,748 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:30:33,749 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T07:30:33,749 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T07:30:33,749 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T07:30:33,749 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T07:30:33,749 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:30:33,749 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T07:30:33,749 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T07:30:33,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:30:33,751 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:30:33,751 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,752 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:30:33,752 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:30:33,752 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:30:33,753 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,753 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:30:33,753 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:30:33,754 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:30:33,754 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:30:33,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:30:33,755 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:30:33,755 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,755 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:30:33,755 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:30:33,756 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740 2024-11-25T07:30:33,757 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740 2024-11-25T07:30:33,758 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:30:33,758 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:30:33,759 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:30:33,760 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:30:33,761 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812726, jitterRate=0.0334351509809494}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:30:33,761 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T07:30:33,762 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732519833749Writing region info on filesystem at 1732519833749Initializing all the Stores at 1732519833750 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519833750Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519833750Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519833750Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519833750Cleaning up temporary data from old regions at 1732519833759 (+9 ms)Running coprocessor post-open hooks at 1732519833761 (+2 ms)Region opened successfully at 1732519833762 (+1 ms) 2024-11-25T07:30:33,763 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732519833734 2024-11-25T07:30:33,765 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T07:30:33,765 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T07:30:33,766 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5eb3d201e8c9,42547,1732519832962 2024-11-25T07:30:33,768 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,42547,1732519832962, state=OPEN 2024-11-25T07:30:33,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:30:33,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:30:33,773 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:30:33,773 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,42547,1732519832962 2024-11-25T07:30:33,773 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:30:33,775 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T07:30:33,776 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,42547,1732519832962 in 192 msec 2024-11-25T07:30:33,778 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T07:30:33,778 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 603 msec 2024-11-25T07:30:33,779 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:30:33,779 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T07:30:33,780 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:30:33,780 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,42547,1732519832962, seqNum=-1] 2024-11-25T07:30:33,781 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:30:33,782 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46847, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:30:33,787 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 654 msec 2024-11-25T07:30:33,787 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732519833787, completionTime=-1 2024-11-25T07:30:33,787 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T07:30:33,787 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T07:30:33,788 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T07:30:33,788 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732519893788 2024-11-25T07:30:33,788 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732519953788 2024-11-25T07:30:33,788 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-25T07:30:33,789 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,43681,1732519832906-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,789 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,43681,1732519832906-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,789 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,43681,1732519832906-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,789 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5eb3d201e8c9:43681, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,789 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,789 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,791 DEBUG [master/5eb3d201e8c9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T07:30:33,792 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.798sec 2024-11-25T07:30:33,792 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T07:30:33,792 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T07:30:33,792 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T07:30:33,792 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T07:30:33,792 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T07:30:33,793 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,43681,1732519832906-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:30:33,793 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,43681,1732519832906-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T07:30:33,795 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T07:30:33,795 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T07:30:33,795 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,43681,1732519832906-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:30:33,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:33,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:33,877 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ad6192b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:30:33,877 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5eb3d201e8c9,43681,-1 for getting cluster id 2024-11-25T07:30:33,877 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T07:30:33,879 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '276ce43b-3aca-482f-aad7-07e0228cdba3' 2024-11-25T07:30:33,879 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T07:30:33,879 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "276ce43b-3aca-482f-aad7-07e0228cdba3" 2024-11-25T07:30:33,880 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7058ad72, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:30:33,880 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5eb3d201e8c9,43681,-1] 2024-11-25T07:30:33,880 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T07:30:33,880 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:30:33,882 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57148, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T07:30:33,882 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30b55012, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:30:33,883 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:30:33,883 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,42547,1732519832962, seqNum=-1] 2024-11-25T07:30:33,884 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:30:33,885 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39710, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:30:33,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5eb3d201e8c9,43681,1732519832906 2024-11-25T07:30:33,887 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:30:33,890 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T07:30:33,890 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-25T07:30:33,890 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-25T07:30:33,890 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T07:30:33,891 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 5eb3d201e8c9,43681,1732519832906 2024-11-25T07:30:33,891 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@326b4795 2024-11-25T07:30:33,891 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T07:30:33,892 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57164, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T07:30:33,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43681 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T07:30:33,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43681 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T07:30:33,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43681 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T07:30:33,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43681 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-25T07:30:33,896 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T07:30:33,896 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:33,896 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43681 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-25T07:30:33,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43681 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T07:30:33,897 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T07:30:33,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40409 is added to blk_1073741835_1011 (size=395) 2024-11-25T07:30:33,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39907 is added to blk_1073741835_1011 (size=395) 2024-11-25T07:30:33,906 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0e6552fb840445ac7178c07a96e4625c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74 2024-11-25T07:30:33,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40409 is added to blk_1073741836_1012 (size=78) 2024-11-25T07:30:33,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39907 is added to blk_1073741836_1012 (size=78) 2024-11-25T07:30:33,912 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:30:33,912 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 0e6552fb840445ac7178c07a96e4625c, disabling compactions & flushes 2024-11-25T07:30:33,912 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. 2024-11-25T07:30:33,912 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. 2024-11-25T07:30:33,912 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. after waiting 0 ms 2024-11-25T07:30:33,912 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. 2024-11-25T07:30:33,913 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. 2024-11-25T07:30:33,913 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0e6552fb840445ac7178c07a96e4625c: Waiting for close lock at 1732519833912Disabling compacts and flushes for region at 1732519833912Disabling writes for close at 1732519833912Writing region close event to WAL at 1732519833913 (+1 ms)Closed at 1732519833913 2024-11-25T07:30:33,914 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T07:30:33,915 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732519833914"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732519833914"}]},"ts":"1732519833914"} 2024-11-25T07:30:33,917 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T07:30:33,918 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T07:30:33,918 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732519833918"}]},"ts":"1732519833918"} 2024-11-25T07:30:33,920 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-25T07:30:33,921 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0e6552fb840445ac7178c07a96e4625c, ASSIGN}] 2024-11-25T07:30:33,922 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0e6552fb840445ac7178c07a96e4625c, ASSIGN 2024-11-25T07:30:33,923 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0e6552fb840445ac7178c07a96e4625c, ASSIGN; state=OFFLINE, location=5eb3d201e8c9,42547,1732519832962; forceNewPlan=false, retain=false 2024-11-25T07:30:34,074 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0e6552fb840445ac7178c07a96e4625c, regionState=OPENING, regionLocation=5eb3d201e8c9,42547,1732519832962 2024-11-25T07:30:34,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0e6552fb840445ac7178c07a96e4625c, ASSIGN because future has completed 2024-11-25T07:30:34,077 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e6552fb840445ac7178c07a96e4625c, server=5eb3d201e8c9,42547,1732519832962}] 2024-11-25T07:30:34,233 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. 2024-11-25T07:30:34,234 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0e6552fb840445ac7178c07a96e4625c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c.', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:30:34,234 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 0e6552fb840445ac7178c07a96e4625c 2024-11-25T07:30:34,234 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:30:34,234 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0e6552fb840445ac7178c07a96e4625c 2024-11-25T07:30:34,234 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0e6552fb840445ac7178c07a96e4625c 2024-11-25T07:30:34,235 INFO [StoreOpener-0e6552fb840445ac7178c07a96e4625c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0e6552fb840445ac7178c07a96e4625c 2024-11-25T07:30:34,237 INFO [StoreOpener-0e6552fb840445ac7178c07a96e4625c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0e6552fb840445ac7178c07a96e4625c columnFamilyName info 2024-11-25T07:30:34,237 DEBUG [StoreOpener-0e6552fb840445ac7178c07a96e4625c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:30:34,237 INFO [StoreOpener-0e6552fb840445ac7178c07a96e4625c-1 {}] regionserver.HStore(327): Store=0e6552fb840445ac7178c07a96e4625c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:30:34,237 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0e6552fb840445ac7178c07a96e4625c 2024-11-25T07:30:34,238 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/default/TestLogRolling-testLogRollOnPipelineRestart/0e6552fb840445ac7178c07a96e4625c 2024-11-25T07:30:34,238 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/default/TestLogRolling-testLogRollOnPipelineRestart/0e6552fb840445ac7178c07a96e4625c 2024-11-25T07:30:34,239 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0e6552fb840445ac7178c07a96e4625c 2024-11-25T07:30:34,239 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0e6552fb840445ac7178c07a96e4625c 2024-11-25T07:30:34,240 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0e6552fb840445ac7178c07a96e4625c 2024-11-25T07:30:34,242 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/default/TestLogRolling-testLogRollOnPipelineRestart/0e6552fb840445ac7178c07a96e4625c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:30:34,242 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0e6552fb840445ac7178c07a96e4625c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847592, jitterRate=0.0777692198753357}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T07:30:34,243 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0e6552fb840445ac7178c07a96e4625c 2024-11-25T07:30:34,243 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0e6552fb840445ac7178c07a96e4625c: Running coprocessor pre-open hook at 1732519834234Writing region info on filesystem at 1732519834234Initializing all the Stores at 1732519834235 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519834235Cleaning up temporary data from old regions at 1732519834239 (+4 ms)Running coprocessor post-open hooks at 1732519834243 (+4 ms)Region opened successfully at 1732519834243 2024-11-25T07:30:34,244 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c., pid=6, masterSystemTime=1732519834230 2024-11-25T07:30:34,246 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. 2024-11-25T07:30:34,247 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. 2024-11-25T07:30:34,247 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0e6552fb840445ac7178c07a96e4625c, regionState=OPEN, openSeqNum=2, regionLocation=5eb3d201e8c9,42547,1732519832962 2024-11-25T07:30:34,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e6552fb840445ac7178c07a96e4625c, server=5eb3d201e8c9,42547,1732519832962 because future has completed 2024-11-25T07:30:34,253 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T07:30:34,253 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0e6552fb840445ac7178c07a96e4625c, server=5eb3d201e8c9,42547,1732519832962 in 174 msec 2024-11-25T07:30:34,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T07:30:34,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0e6552fb840445ac7178c07a96e4625c, ASSIGN in 332 msec 2024-11-25T07:30:34,257 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T07:30:34,257 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732519834257"}]},"ts":"1732519834257"} 2024-11-25T07:30:34,259 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-25T07:30:34,260 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T07:30:34,262 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 366 msec 2024-11-25T07:30:34,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:34,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:35,761 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T07:30:35,762 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-25T07:30:35,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-25T07:30:35,762 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-25T07:30:35,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:30:35,763 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-25T07:30:35,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:35,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:36,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:36,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:37,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:37,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:38,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:38,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:39,251 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T07:30:39,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:39,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:39,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:39,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:39,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:39,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:39,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:39,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:39,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:39,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:30:39,283 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T07:30:39,284 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-25T07:30:39,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:39,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:40,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:40,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:41,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:41,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:42,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:42,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:43,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:43,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:43,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43681 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T07:30:43,937 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-25T07:30:43,937 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-25T07:30:43,940 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-25T07:30:43,940 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. 2024-11-25T07:30:43,943 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c., hostname=5eb3d201e8c9,42547,1732519832962, seqNum=2] 2024-11-25T07:30:44,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:44,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:45,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:45,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:45,946 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 2024-11-25T07:30:45,947 WARN [ResponseProcessor for block BP-704301945-172.17.0.2-1732519832220:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-704301945-172.17.0.2-1732519832220:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:45,947 WARN [ResponseProcessor for block BP-704301945-172.17.0.2-1732519832220:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-704301945-172.17.0.2-1732519832220:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-704301945-172.17.0.2-1732519832220:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:40409,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:45,947 WARN [ResponseProcessor for block BP-704301945-172.17.0.2-1732519832220:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-704301945-172.17.0.2-1732519832220:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-704301945-172.17.0.2-1732519832220:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:40409,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:45,947 WARN [DataStreamer for file /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 block BP-704301945-172.17.0.2-1732519832220:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-704301945-172.17.0.2-1732519832220:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40409,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK], DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40409,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK]) is bad. 2024-11-25T07:30:45,947 WARN [DataStreamer for file /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.meta.1732519833742.meta block BP-704301945-172.17.0.2-1732519832220:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-704301945-172.17.0.2-1732519832220:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK], DatanodeInfoWithStorage[127.0.0.1:40409,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40409,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK]) is bad. 2024-11-25T07:30:45,947 WARN [DataStreamer for file /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/WALs/5eb3d201e8c9,43681,1732519832906/5eb3d201e8c9%2C43681%2C1732519832906.1732519833053 block BP-704301945-172.17.0.2-1732519832220:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-704301945-172.17.0.2-1732519832220:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK], DatanodeInfoWithStorage[127.0.0.1:40409,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40409,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK]) is bad. 2024-11-25T07:30:45,948 WARN [PacketResponder: BP-704301945-172.17.0.2-1732519832220:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40409] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:45,948 WARN [PacketResponder: BP-704301945-172.17.0.2-1732519832220:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40409] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:45,948 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1899189270_22 at /127.0.0.1:52684 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52684 dst: /127.0.0.1:39907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:45,948 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1084514458_22 at /127.0.0.1:52716 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52716 dst: /127.0.0.1:39907 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:45,948 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1899189270_22 at /127.0.0.1:42052 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40409:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42052 dst: /127.0.0.1:40409 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:45,948 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1084514458_22 at /127.0.0.1:52704 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52704 dst: /127.0.0.1:39907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:45,948 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1084514458_22 at /127.0.0.1:42094 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40409:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42094 dst: /127.0.0.1:40409 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:45,948 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1084514458_22 at /127.0.0.1:42086 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40409:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42086 dst: /127.0.0.1:40409 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:45,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3127d0f5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:45,951 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@16178224{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:30:45,951 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:30:45,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47ffea33{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:30:45,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@123783d2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,STOPPED} 2024-11-25T07:30:45,953 WARN [BP-704301945-172.17.0.2-1732519832220 heartbeating to localhost/127.0.0.1:46285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:30:45,953 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:30:45,953 WARN [BP-704301945-172.17.0.2-1732519832220 heartbeating to localhost/127.0.0.1:46285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-704301945-172.17.0.2-1732519832220 (Datanode Uuid 716db90c-d81d-4ded-bc47-0f24b99e4835) service to localhost/127.0.0.1:46285 2024-11-25T07:30:45,953 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:30:45,954 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data4/current/BP-704301945-172.17.0.2-1732519832220 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:45,954 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:30:45,956 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data3/current/BP-704301945-172.17.0.2-1732519832220 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:45,963 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:30:45,966 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:30:45,967 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:30:45,967 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:30:45,967 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:30:45,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1763b2d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:30:45,968 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3903d405{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:30:46,081 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55b48cd0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/java.io.tmpdir/jetty-localhost-45433-hadoop-hdfs-3_4_1-tests_jar-_-any-1221513310996360957/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:46,082 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@62f6e774{HTTP/1.1, (http/1.1)}{localhost:45433} 2024-11-25T07:30:46,082 INFO [Time-limited test {}] server.Server(415): Started @165929ms 2024-11-25T07:30:46,083 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:30:46,102 WARN [ResponseProcessor for block BP-704301945-172.17.0.2-1732519832220:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-704301945-172.17.0.2-1732519832220:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:46,102 WARN [ResponseProcessor for block BP-704301945-172.17.0.2-1732519832220:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-704301945-172.17.0.2-1732519832220:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:46,102 WARN [ResponseProcessor for block BP-704301945-172.17.0.2-1732519832220:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-704301945-172.17.0.2-1732519832220:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:46,102 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1084514458_22 at /127.0.0.1:36104 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36104 dst: /127.0.0.1:39907 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:46,102 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1899189270_22 at /127.0.0.1:36124 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36124 dst: /127.0.0.1:39907 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:46,103 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1084514458_22 at /127.0.0.1:36102 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36102 dst: /127.0.0.1:39907 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:46,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70df7796{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:46,106 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2efee71d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:30:46,106 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:30:46,106 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19160285{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:30:46,106 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57204301{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,STOPPED} 2024-11-25T07:30:46,107 WARN [BP-704301945-172.17.0.2-1732519832220 heartbeating to localhost/127.0.0.1:46285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:30:46,107 WARN [BP-704301945-172.17.0.2-1732519832220 heartbeating to localhost/127.0.0.1:46285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-704301945-172.17.0.2-1732519832220 (Datanode Uuid b944ae9b-1e3c-4ad6-89ea-1fd1dbc1c46c) service to localhost/127.0.0.1:46285 2024-11-25T07:30:46,107 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:30:46,107 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:30:46,108 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data1/current/BP-704301945-172.17.0.2-1732519832220 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:46,109 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data2/current/BP-704301945-172.17.0.2-1732519832220 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:46,109 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:30:46,122 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:30:46,125 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:30:46,125 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:30:46,125 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:30:46,125 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:30:46,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fa662a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:30:46,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e3b369c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:30:46,187 WARN [Thread-1341 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:30:46,190 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5b9f7dec1c4b61f0 with lease ID 0xc1c95164e88c7b4e: from storage DS-aba97906-0443-4941-9a40-b577bdb173f2 node DatanodeRegistration(127.0.0.1:42791, datanodeUuid=716db90c-d81d-4ded-bc47-0f24b99e4835, infoPort=42899, infoSecurePort=0, ipcPort=46803, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:30:46,190 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5b9f7dec1c4b61f0 with lease ID 0xc1c95164e88c7b4e: from storage DS-7a834788-9021-4f4e-a348-f8093ff6eb17 node DatanodeRegistration(127.0.0.1:42791, datanodeUuid=716db90c-d81d-4ded-bc47-0f24b99e4835, infoPort=42899, infoSecurePort=0, ipcPort=46803, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:30:46,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c83d523{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/java.io.tmpdir/jetty-localhost-37695-hadoop-hdfs-3_4_1-tests_jar-_-any-2882826400711028772/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:46,248 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@341e0a7a{HTTP/1.1, (http/1.1)}{localhost:37695} 2024-11-25T07:30:46,248 INFO [Time-limited test {}] server.Server(415): Started @166095ms 2024-11-25T07:30:46,250 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:30:46,372 WARN [Thread-1372 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:30:46,374 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x167e36a9a877b96b with lease ID 0xc1c95164e88c7b4f: from storage DS-70a82119-7acc-4462-9ebd-5d7d216bf31a node DatanodeRegistration(127.0.0.1:40083, datanodeUuid=b944ae9b-1e3c-4ad6-89ea-1fd1dbc1c46c, infoPort=46813, infoSecurePort=0, ipcPort=41571, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:30:46,375 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x167e36a9a877b96b with lease ID 0xc1c95164e88c7b4f: from storage DS-9139ed92-a474-42a5-bdb4-26643ed67ce2 node DatanodeRegistration(127.0.0.1:40083, datanodeUuid=b944ae9b-1e3c-4ad6-89ea-1fd1dbc1c46c, infoPort=46813, infoSecurePort=0, ipcPort=41571, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T07:30:46,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:46,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:47,267 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-25T07:30:47,270 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-25T07:30:47,271 ERROR [FSHLog-0-hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74-prefix:5eb3d201e8c9,42547,1732519832962 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:47,271 WARN [FSHLog-0-hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74-prefix:5eb3d201e8c9,42547,1732519832962 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:47,271 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C42547%2C1732519832962:(num 1732519833344) roll requested 2024-11-25T07:30:47,272 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 2024-11-25T07:30:47,277 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 newFile=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 2024-11-25T07:30:47,278 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:47,278 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:47,278 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:47,278 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:47,278 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:47,278 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 2024-11-25T07:30:47,279 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:47,279 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:47,279 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 2024-11-25T07:30:47,279 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42899:42899),(127.0.0.1/127.0.0.1:46813:46813)] 2024-11-25T07:30:47,279 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 is not closed yet, will try archiving it next time 2024-11-25T07:30:47,279 WARN [IPC Server handler 3 on default port 46285 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-25T07:30:47,280 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 after 1ms 2024-11-25T07:30:47,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:47,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:48,191 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-25T07:30:48,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:48,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:49,282 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-25T07:30:49,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:49,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:50,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:50,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:51,280 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 after 4001ms 2024-11-25T07:30:51,285 WARN [ResponseProcessor for block BP-704301945-172.17.0.2-1732519832220:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-704301945-172.17.0.2-1732519832220:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-704301945-172.17.0.2-1732519832220:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:40083,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:51,286 WARN [DataStreamer for file /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 block BP-704301945-172.17.0.2-1732519832220:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-704301945-172.17.0.2-1732519832220:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42791,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK], DatanodeInfoWithStorage[127.0.0.1:40083,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40083,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]) is bad. 2024-11-25T07:30:51,286 WARN [PacketResponder: BP-704301945-172.17.0.2-1732519832220:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40083] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:51,286 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1084514458_22 at /127.0.0.1:54906 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42791:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54906 dst: /127.0.0.1:42791 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:51,286 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1084514458_22 at /127.0.0.1:48816 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40083:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48816 dst: /127.0.0.1:40083 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:51,287 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c83d523{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:51,288 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@341e0a7a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:30:51,288 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:30:51,288 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e3b369c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:30:51,288 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fa662a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,STOPPED} 2024-11-25T07:30:51,289 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:30:51,289 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:30:51,289 WARN [BP-704301945-172.17.0.2-1732519832220 heartbeating to localhost/127.0.0.1:46285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:30:51,289 WARN [BP-704301945-172.17.0.2-1732519832220 heartbeating to localhost/127.0.0.1:46285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-704301945-172.17.0.2-1732519832220 (Datanode Uuid b944ae9b-1e3c-4ad6-89ea-1fd1dbc1c46c) service to localhost/127.0.0.1:46285 2024-11-25T07:30:51,290 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data1/current/BP-704301945-172.17.0.2-1732519832220 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:51,290 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data2/current/BP-704301945-172.17.0.2-1732519832220 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:51,291 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:30:51,299 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:30:51,303 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:30:51,304 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:30:51,304 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:30:51,304 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:30:51,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4307cd3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:30:51,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21404da7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:30:51,419 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3affdf32{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/java.io.tmpdir/jetty-localhost-39263-hadoop-hdfs-3_4_1-tests_jar-_-any-12267192211197907229/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:51,419 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ebbad67{HTTP/1.1, (http/1.1)}{localhost:39263} 2024-11-25T07:30:51,420 INFO [Time-limited test {}] server.Server(415): Started @171267ms 2024-11-25T07:30:51,421 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:30:51,449 WARN [ResponseProcessor for block BP-704301945-172.17.0.2-1732519832220:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-704301945-172.17.0.2-1732519832220:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:51,449 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1084514458_22 at /127.0.0.1:52568 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42791:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52568 dst: /127.0.0.1:42791 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:51,454 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55b48cd0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:51,455 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@62f6e774{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:30:51,455 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:30:51,455 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3903d405{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:30:51,455 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1763b2d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,STOPPED} 2024-11-25T07:30:51,456 WARN [BP-704301945-172.17.0.2-1732519832220 heartbeating to localhost/127.0.0.1:46285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:30:51,456 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:30:51,456 WARN [BP-704301945-172.17.0.2-1732519832220 heartbeating to localhost/127.0.0.1:46285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-704301945-172.17.0.2-1732519832220 (Datanode Uuid 716db90c-d81d-4ded-bc47-0f24b99e4835) service to localhost/127.0.0.1:46285 2024-11-25T07:30:51,456 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:30:51,458 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data3/current/BP-704301945-172.17.0.2-1732519832220 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:51,458 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data4/current/BP-704301945-172.17.0.2-1732519832220 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:30:51,458 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:30:51,472 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:30:51,475 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:30:51,476 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:30:51,476 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:30:51,476 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:30:51,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ae70be3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:30:51,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58c81822{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:30:51,524 WARN [Thread-1415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:30:51,527 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6bc9ac463250f5ba with lease ID 0xc1c95164e88c7b50: from storage DS-70a82119-7acc-4462-9ebd-5d7d216bf31a node DatanodeRegistration(127.0.0.1:33397, datanodeUuid=b944ae9b-1e3c-4ad6-89ea-1fd1dbc1c46c, infoPort=45871, infoSecurePort=0, ipcPort=41247, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T07:30:51,527 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6bc9ac463250f5ba with lease ID 0xc1c95164e88c7b50: from storage DS-9139ed92-a474-42a5-bdb4-26643ed67ce2 node DatanodeRegistration(127.0.0.1:33397, datanodeUuid=b944ae9b-1e3c-4ad6-89ea-1fd1dbc1c46c, infoPort=45871, infoSecurePort=0, ipcPort=41247, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:30:51,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c3c718b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/java.io.tmpdir/jetty-localhost-36621-hadoop-hdfs-3_4_1-tests_jar-_-any-6552566541490247311/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:30:51,599 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7df90452{HTTP/1.1, (http/1.1)}{localhost:36621} 2024-11-25T07:30:51,599 INFO [Time-limited test {}] server.Server(415): Started @171446ms 2024-11-25T07:30:51,601 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:30:51,687 WARN [Thread-1446 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:30:51,690 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb7a9bf3c9c6dbf6 with lease ID 0xc1c95164e88c7b51: from storage DS-aba97906-0443-4941-9a40-b577bdb173f2 node DatanodeRegistration(127.0.0.1:32883, datanodeUuid=716db90c-d81d-4ded-bc47-0f24b99e4835, infoPort=38485, infoSecurePort=0, ipcPort=36993, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:30:51,690 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb7a9bf3c9c6dbf6 with lease ID 0xc1c95164e88c7b51: from storage DS-7a834788-9021-4f4e-a348-f8093ff6eb17 node DatanodeRegistration(127.0.0.1:32883, datanodeUuid=716db90c-d81d-4ded-bc47-0f24b99e4835, infoPort=38485, infoSecurePort=0, ipcPort=36993, storageInfo=lv=-57;cid=testClusterID;nsid=1585012975;c=1732519832220), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:30:51,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:51,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:52,618 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-25T07:30:52,620 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-25T07:30:52,622 ERROR [FSHLog-0-hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74-prefix:5eb3d201e8c9,42547,1732519832962 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42791,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:52,622 WARN [FSHLog-0-hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74-prefix:5eb3d201e8c9,42547,1732519832962 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42791,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:52,622 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C42547%2C1732519832962:(num 1732519847272) roll requested 2024-11-25T07:30:52,622 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C42547%2C1732519832962.1732519852622 2024-11-25T07:30:52,627 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 newFile=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519852622 2024-11-25T07:30:52,628 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:52,628 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:52,628 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:52,628 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:52,628 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:52,628 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519852622 2024-11-25T07:30:52,628 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42791,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:52,628 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42791,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:52,628 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 2024-11-25T07:30:52,629 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45871:45871),(127.0.0.1/127.0.0.1:38485:38485)] 2024-11-25T07:30:52,629 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 is not closed yet, will try archiving it next time 2024-11-25T07:30:52,629 WARN [IPC Server handler 1 on default port 46285 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-25T07:30:52,629 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 after 1ms 2024-11-25T07:30:52,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741837_1020 (size=2427) 2024-11-25T07:30:52,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:52,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:53,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:53,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:54,630 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 2024-11-25T07:30:54,636 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519852622 newFile=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 2024-11-25T07:30:54,636 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:54,636 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:54,636 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:54,636 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:54,636 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:54,637 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519852622 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 2024-11-25T07:30:54,637 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38485:38485),(127.0.0.1/127.0.0.1:45871:45871)] 2024-11-25T07:30:54,638 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 is not closed yet, will try archiving it next time 2024-11-25T07:30:54,638 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519852622 is not closed yet, will try archiving it next time 2024-11-25T07:30:54,638 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 2024-11-25T07:30:54,638 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 2024-11-25T07:30:54,638 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 after 0ms 2024-11-25T07:30:54,638 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 2024-11-25T07:30:54,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741838_1019 (size=1264) 2024-11-25T07:30:54,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33397 is added to blk_1073741838_1019 (size=1264) 2024-11-25T07:30:54,639 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 is not closed yet, will try archiving it next time 2024-11-25T07:30:54,648 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732519834243/Put/vlen=218/seqid=0] 2024-11-25T07:30:54,648 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732519843945/Put/vlen=1045/seqid=0] 2024-11-25T07:30:54,648 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519833344 2024-11-25T07:30:54,648 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 2024-11-25T07:30:54,648 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 2024-11-25T07:30:54,649 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 after 1ms 2024-11-25T07:30:54,649 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 2024-11-25T07:30:54,652 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732519847271/Put/vlen=1045/seqid=0] 2024-11-25T07:30:54,652 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732519849283/Put/vlen=1045/seqid=0] 2024-11-25T07:30:54,652 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 2024-11-25T07:30:54,652 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519852622 2024-11-25T07:30:54,652 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519852622 2024-11-25T07:30:54,652 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519852622 after 0ms 2024-11-25T07:30:54,652 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519852622 2024-11-25T07:30:54,655 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732519852622/Put/vlen=1045/seqid=0] 2024-11-25T07:30:54,655 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 2024-11-25T07:30:54,655 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 2024-11-25T07:30:54,656 WARN [IPC Server handler 4 on default port 46285 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-25T07:30:54,656 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 after 1ms 2024-11-25T07:30:54,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:54,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:55,530 WARN [ResponseProcessor for block BP-704301945-172.17.0.2-1732519832220:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-704301945-172.17.0.2-1732519832220:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:55,530 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1899189270_22 at /127.0.0.1:46760 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:32883:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46760 dst: /127.0.0.1:32883 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:32883 remote=/127.0.0.1:46760]. Total timeout mills is 60000, 59106 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:55,530 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1899189270_22 at /127.0.0.1:48858 [Receiving block BP-704301945-172.17.0.2-1732519832220:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33397:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48858 dst: /127.0.0.1:33397 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:30:55,530 WARN [DataStreamer for file /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 block BP-704301945-172.17.0.2-1732519832220:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-704301945-172.17.0.2-1732519832220:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32883,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK], DatanodeInfoWithStorage[127.0.0.1:33397,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32883,DS-aba97906-0443-4941-9a40-b577bdb173f2,DISK]) is bad. 2024-11-25T07:30:55,531 WARN [DataStreamer for file /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 block BP-704301945-172.17.0.2-1732519832220:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-704301945-172.17.0.2-1732519832220:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:55,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741839_1022 (size=85) 2024-11-25T07:30:55,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:55,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:56,526 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-25T07:30:56,630 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519847272 after 4002ms 2024-11-25T07:30:56,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:56,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:57,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:57,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:58,657 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 after 4002ms 2024-11-25T07:30:58,657 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 2024-11-25T07:30:58,661 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 2024-11-25T07:30:58,661 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 0e6552fb840445ac7178c07a96e4625c 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-25T07:30:58,661 ERROR [FSHLog-0-hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74-prefix:5eb3d201e8c9,42547,1732519832962 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-704301945-172.17.0.2-1732519832220:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:58,662 WARN [FSHLog-0-hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74-prefix:5eb3d201e8c9,42547,1732519832962 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-704301945-172.17.0.2-1732519832220:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:58,662 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C42547%2C1732519832962:(num 1732519854630) roll requested 2024-11-25T07:30:58,662 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C42547%2C1732519832962.1732519858662 2024-11-25T07:30:58,669 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 newFile=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519858662 2024-11-25T07:30:58,670 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:58,670 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:58,670 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:58,670 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:58,670 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:58,670 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519858662 2024-11-25T07:30:58,670 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-704301945-172.17.0.2-1732519832220:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:58,671 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38485:38485),(127.0.0.1/127.0.0.1:45871:45871)] 2024-11-25T07:30:58,671 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 is not closed yet, will try archiving it next time 2024-11-25T07:30:58,671 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-704301945-172.17.0.2-1732519832220:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:58,671 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 2024-11-25T07:30:58,671 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 after 0ms 2024-11-25T07:30:58,672 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 to hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/oldWALs/5eb3d201e8c9%2C42547%2C1732519832962.1732519854630 2024-11-25T07:30:58,687 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/default/TestLogRolling-testLogRollOnPipelineRestart/0e6552fb840445ac7178c07a96e4625c/.tmp/info/0094429bfb28455eac0e6ab30310d467 is 1080, key is row1002/info:/1732519843945/Put/seqid=0 2024-11-25T07:30:58,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33397 is added to blk_1073741841_1024 (size=9270) 2024-11-25T07:30:58,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741841_1024 (size=9270) 2024-11-25T07:30:58,692 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/default/TestLogRolling-testLogRollOnPipelineRestart/0e6552fb840445ac7178c07a96e4625c/.tmp/info/0094429bfb28455eac0e6ab30310d467 2024-11-25T07:30:58,698 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/default/TestLogRolling-testLogRollOnPipelineRestart/0e6552fb840445ac7178c07a96e4625c/.tmp/info/0094429bfb28455eac0e6ab30310d467 as hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/default/TestLogRolling-testLogRollOnPipelineRestart/0e6552fb840445ac7178c07a96e4625c/info/0094429bfb28455eac0e6ab30310d467 2024-11-25T07:30:58,703 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/default/TestLogRolling-testLogRollOnPipelineRestart/0e6552fb840445ac7178c07a96e4625c/info/0094429bfb28455eac0e6ab30310d467, entries=4, sequenceid=8, filesize=9.1 K 2024-11-25T07:30:58,704 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 0e6552fb840445ac7178c07a96e4625c in 43ms, sequenceid=8, compaction requested=false 2024-11-25T07:30:58,705 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 0e6552fb840445ac7178c07a96e4625c: 2024-11-25T07:30:58,705 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-25T07:30:58,705 ERROR [FSHLog-0-hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74-prefix:5eb3d201e8c9,42547,1732519832962.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:58,705 WARN [FSHLog-0-hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74-prefix:5eb3d201e8c9,42547,1732519832962.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:58,705 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C42547%2C1732519832962.meta:.meta(num 1732519833742) roll requested 2024-11-25T07:30:58,706 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C42547%2C1732519832962.meta.1732519858705.meta 2024-11-25T07:30:58,710 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:58,710 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:58,710 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:58,710 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:58,710 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:58,710 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.meta.1732519833742.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.meta.1732519858705.meta 2024-11-25T07:30:58,711 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:58,711 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:30:58,711 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.meta.1732519833742.meta 2024-11-25T07:30:58,711 WARN [IPC Server handler 4 on default port 46285 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.meta.1732519833742.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-25T07:30:58,711 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38485:38485),(127.0.0.1/127.0.0.1:45871:45871)] 2024-11-25T07:30:58,711 DEBUG [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.meta.1732519833742.meta is not closed yet, will try archiving it next time 2024-11-25T07:30:58,712 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.meta.1732519833742.meta after 1ms 2024-11-25T07:30:58,727 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/.tmp/info/36e1a9d814354af0b827ac8f4f7682c0 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c./info:regioninfo/1732519834247/Put/seqid=0 2024-11-25T07:30:58,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33397 is added to blk_1073741843_1027 (size=7125) 2024-11-25T07:30:58,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741843_1027 (size=7125) 2024-11-25T07:30:58,732 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/.tmp/info/36e1a9d814354af0b827ac8f4f7682c0 2024-11-25T07:30:58,754 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/.tmp/ns/79c94a7fc6cf448fb13a662e3c7ad486 is 43, key is default/ns:d/1732519833782/Put/seqid=0 2024-11-25T07:30:58,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33397 is added to blk_1073741844_1028 (size=5153) 2024-11-25T07:30:58,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741844_1028 (size=5153) 2024-11-25T07:30:58,760 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/.tmp/ns/79c94a7fc6cf448fb13a662e3c7ad486 2024-11-25T07:30:58,780 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/.tmp/table/830b4b8898e14a74a2d2ff91b2d3ab1c is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732519834257/Put/seqid=0 2024-11-25T07:30:58,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741845_1029 (size=5438) 2024-11-25T07:30:58,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33397 is added to blk_1073741845_1029 (size=5438) 2024-11-25T07:30:58,785 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/.tmp/table/830b4b8898e14a74a2d2ff91b2d3ab1c 2024-11-25T07:30:58,791 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/.tmp/info/36e1a9d814354af0b827ac8f4f7682c0 as hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/info/36e1a9d814354af0b827ac8f4f7682c0 2024-11-25T07:30:58,796 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/info/36e1a9d814354af0b827ac8f4f7682c0, entries=10, sequenceid=11, filesize=7.0 K 2024-11-25T07:30:58,797 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/.tmp/ns/79c94a7fc6cf448fb13a662e3c7ad486 as hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/ns/79c94a7fc6cf448fb13a662e3c7ad486 2024-11-25T07:30:58,802 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/ns/79c94a7fc6cf448fb13a662e3c7ad486, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T07:30:58,803 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/.tmp/table/830b4b8898e14a74a2d2ff91b2d3ab1c as hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/table/830b4b8898e14a74a2d2ff91b2d3ab1c 2024-11-25T07:30:58,808 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/table/830b4b8898e14a74a2d2ff91b2d3ab1c, entries=2, sequenceid=11, filesize=5.3 K 2024-11-25T07:30:58,809 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 104ms, sequenceid=11, compaction requested=false 2024-11-25T07:30:58,810 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-25T07:30:58,816 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T07:30:58,816 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:30:58,816 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:30:58,816 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:30:58,816 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:30:58,817 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T07:30:58,817 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T07:30:58,817 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1246414341, stopped=false 2024-11-25T07:30:58,817 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5eb3d201e8c9,43681,1732519832906 2024-11-25T07:30:58,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:30:58,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:30:58,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:58,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:30:58,818 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:30:58,819 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:30:58,819 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:30:58,819 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:30:58,820 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:30:58,820 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5eb3d201e8c9,42547,1732519832962' ***** 2024-11-25T07:30:58,820 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T07:30:58,820 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:30:58,820 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T07:30:58,821 INFO [RS:0;5eb3d201e8c9:42547 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T07:30:58,821 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T07:30:58,821 INFO [RS:0;5eb3d201e8c9:42547 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T07:30:58,821 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(3091): Received CLOSE for 0e6552fb840445ac7178c07a96e4625c 2024-11-25T07:30:58,821 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(959): stopping server 5eb3d201e8c9,42547,1732519832962 2024-11-25T07:30:58,821 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:30:58,821 INFO [RS:0;5eb3d201e8c9:42547 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5eb3d201e8c9:42547. 2024-11-25T07:30:58,821 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0e6552fb840445ac7178c07a96e4625c, disabling compactions & flushes 2024-11-25T07:30:58,821 DEBUG [RS:0;5eb3d201e8c9:42547 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:30:58,821 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. 2024-11-25T07:30:58,822 DEBUG [RS:0;5eb3d201e8c9:42547 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:30:58,822 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. 2024-11-25T07:30:58,822 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. after waiting 0 ms 2024-11-25T07:30:58,822 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T07:30:58,822 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T07:30:58,822 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T07:30:58,822 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T07:30:58,822 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-25T07:30:58,822 DEBUG [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(1325): Online Regions={0e6552fb840445ac7178c07a96e4625c=TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c., 1588230740=hbase:meta,,1.1588230740} 2024-11-25T07:30:58,822 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. 2024-11-25T07:30:58,822 DEBUG [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(1351): Waiting on 0e6552fb840445ac7178c07a96e4625c, 1588230740 2024-11-25T07:30:58,822 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:30:58,823 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:30:58,823 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:30:58,823 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:30:58,823 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:30:58,827 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/default/TestLogRolling-testLogRollOnPipelineRestart/0e6552fb840445ac7178c07a96e4625c/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-25T07:30:58,827 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T07:30:58,828 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. 2024-11-25T07:30:58,828 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0e6552fb840445ac7178c07a96e4625c: Waiting for close lock at 1732519858821Running coprocessor pre-close hooks at 1732519858821Disabling compacts and flushes for region at 1732519858821Disabling writes for close at 1732519858822 (+1 ms)Writing region close event to WAL at 1732519858823 (+1 ms)Running coprocessor post-close hooks at 1732519858828 (+5 ms)Closed at 1732519858828 2024-11-25T07:30:58,828 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:30:58,828 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732519833893.0e6552fb840445ac7178c07a96e4625c. 2024-11-25T07:30:58,828 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:30:58,828 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519858822Running coprocessor pre-close hooks at 1732519858822Disabling compacts and flushes for region at 1732519858822Disabling writes for close at 1732519858823 (+1 ms)Writing region close event to WAL at 1732519858824 (+1 ms)Running coprocessor post-close hooks at 1732519858828 (+4 ms)Closed at 1732519858828 2024-11-25T07:30:58,828 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T07:30:58,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:58,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:59,023 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(976): stopping server 5eb3d201e8c9,42547,1732519832962; all regions closed. 2024-11-25T07:30:59,023 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:59,023 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:59,023 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:59,023 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:59,024 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:30:59,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33397 is added to blk_1073741842_1025 (size=825) 2024-11-25T07:30:59,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741842_1025 (size=825) 2024-11-25T07:30:59,211 INFO [regionserver/5eb3d201e8c9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-25T07:30:59,211 INFO [regionserver/5eb3d201e8c9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-25T07:30:59,212 INFO [regionserver/5eb3d201e8c9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:30:59,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:30:59,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:00,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:00,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:01,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:01,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:02,689 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-25T07:31:02,712 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.meta.1732519833742.meta after 4001ms 2024-11-25T07:31:02,713 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/WALs/5eb3d201e8c9,42547,1732519832962/5eb3d201e8c9%2C42547%2C1732519832962.meta.1732519833742.meta to hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/oldWALs/5eb3d201e8c9%2C42547%2C1732519832962.meta.1732519833742.meta 2024-11-25T07:31:02,715 DEBUG [RS:0;5eb3d201e8c9:42547 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/oldWALs 2024-11-25T07:31:02,715 INFO [RS:0;5eb3d201e8c9:42547 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C42547%2C1732519832962.meta:.meta(num 1732519858705) 2024-11-25T07:31:02,716 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,716 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,716 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,716 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,716 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33397 is added to blk_1073741840_1023 (size=1162) 2024-11-25T07:31:02,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741840_1023 (size=1162) 2024-11-25T07:31:02,722 DEBUG [RS:0;5eb3d201e8c9:42547 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/oldWALs 2024-11-25T07:31:02,722 INFO [RS:0;5eb3d201e8c9:42547 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C42547%2C1732519832962:(num 1732519858662) 2024-11-25T07:31:02,722 DEBUG [RS:0;5eb3d201e8c9:42547 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:31:02,722 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:31:02,723 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:31:02,723 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.ChoreService(370): Chore service for: regionserver/5eb3d201e8c9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T07:31:02,723 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:31:02,723 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:31:02,723 INFO [RS:0;5eb3d201e8c9:42547 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42547 2024-11-25T07:31:02,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:31:02,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5eb3d201e8c9,42547,1732519832962 2024-11-25T07:31:02,725 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:31:02,727 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5eb3d201e8c9,42547,1732519832962] 2024-11-25T07:31:02,729 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5eb3d201e8c9,42547,1732519832962 already deleted, retry=false 2024-11-25T07:31:02,729 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5eb3d201e8c9,42547,1732519832962 expired; onlineServers=0 2024-11-25T07:31:02,729 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5eb3d201e8c9,43681,1732519832906' ***** 2024-11-25T07:31:02,729 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T07:31:02,729 INFO [M:0;5eb3d201e8c9:43681 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:31:02,729 INFO [M:0;5eb3d201e8c9:43681 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:31:02,729 DEBUG [M:0;5eb3d201e8c9:43681 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T07:31:02,730 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T07:31:02,730 DEBUG [M:0;5eb3d201e8c9:43681 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T07:31:02,730 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519833137 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519833137,5,FailOnTimeoutGroup] 2024-11-25T07:31:02,730 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519833140 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519833140,5,FailOnTimeoutGroup] 2024-11-25T07:31:02,730 INFO [M:0;5eb3d201e8c9:43681 {}] hbase.ChoreService(370): Chore service for: master/5eb3d201e8c9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T07:31:02,730 INFO [M:0;5eb3d201e8c9:43681 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:31:02,730 DEBUG [M:0;5eb3d201e8c9:43681 {}] master.HMaster(1795): Stopping service threads 2024-11-25T07:31:02,730 INFO [M:0;5eb3d201e8c9:43681 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T07:31:02,730 INFO [M:0;5eb3d201e8c9:43681 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:31:02,730 INFO [M:0;5eb3d201e8c9:43681 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T07:31:02,730 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T07:31:02,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T07:31:02,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:02,731 DEBUG [M:0;5eb3d201e8c9:43681 {}] zookeeper.ZKUtil(347): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T07:31:02,731 WARN [M:0;5eb3d201e8c9:43681 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T07:31:02,732 INFO [M:0;5eb3d201e8c9:43681 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/.lastflushedseqids 2024-11-25T07:31:02,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33397 is added to blk_1073741846_1030 (size=130) 2024-11-25T07:31:02,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741846_1030 (size=130) 2024-11-25T07:31:02,738 INFO [M:0;5eb3d201e8c9:43681 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T07:31:02,738 INFO [M:0;5eb3d201e8c9:43681 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T07:31:02,738 DEBUG [M:0;5eb3d201e8c9:43681 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:31:02,738 INFO [M:0;5eb3d201e8c9:43681 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:31:02,739 DEBUG [M:0;5eb3d201e8c9:43681 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:31:02,739 DEBUG [M:0;5eb3d201e8c9:43681 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:31:02,739 DEBUG [M:0;5eb3d201e8c9:43681 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:31:02,739 INFO [M:0;5eb3d201e8c9:43681 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-25T07:31:02,739 ERROR [FSHLog-0-hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData-prefix:5eb3d201e8c9,43681,1732519832906 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:31:02,739 WARN [FSHLog-0-hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData-prefix:5eb3d201e8c9,43681,1732519832906 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:31:02,739 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 5eb3d201e8c9%2C43681%2C1732519832906:(num 1732519833053) roll requested 2024-11-25T07:31:02,740 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C43681%2C1732519832906.1732519862740 2024-11-25T07:31:02,745 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,745 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,745 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,745 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,746 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,746 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/WALs/5eb3d201e8c9,43681,1732519832906/5eb3d201e8c9%2C43681%2C1732519832906.1732519833053 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/WALs/5eb3d201e8c9,43681,1732519832906/5eb3d201e8c9%2C43681%2C1732519832906.1732519862740 2024-11-25T07:31:02,746 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:31:02,746 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39907,DS-70a82119-7acc-4462-9ebd-5d7d216bf31a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T07:31:02,746 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/WALs/5eb3d201e8c9,43681,1732519832906/5eb3d201e8c9%2C43681%2C1732519832906.1732519833053 2024-11-25T07:31:02,747 WARN [IPC Server handler 4 on default port 46285 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/WALs/5eb3d201e8c9,43681,1732519832906/5eb3d201e8c9%2C43681%2C1732519832906.1732519833053 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-25T07:31:02,747 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/WALs/5eb3d201e8c9,43681,1732519832906/5eb3d201e8c9%2C43681%2C1732519832906.1732519833053 after 1ms 2024-11-25T07:31:02,750 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45871:45871),(127.0.0.1/127.0.0.1:38485:38485)] 2024-11-25T07:31:02,750 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/WALs/5eb3d201e8c9,43681,1732519832906/5eb3d201e8c9%2C43681%2C1732519832906.1732519833053 is not closed yet, will try archiving it next time 2024-11-25T07:31:02,771 DEBUG [M:0;5eb3d201e8c9:43681 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/273e1989bb1c4d3187076d5e5c376151 is 82, key is hbase:meta,,1/info:regioninfo/1732519833766/Put/seqid=0 2024-11-25T07:31:02,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741848_1033 (size=5672) 2024-11-25T07:31:02,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33397 is added to blk_1073741848_1033 (size=5672) 2024-11-25T07:31:02,776 INFO [M:0;5eb3d201e8c9:43681 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/273e1989bb1c4d3187076d5e5c376151 2024-11-25T07:31:02,795 DEBUG [M:0;5eb3d201e8c9:43681 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f00012d7510488fb9f4391ad1a39089 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732519834261/Put/seqid=0 2024-11-25T07:31:02,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741849_1034 (size=6119) 2024-11-25T07:31:02,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33397 is added to blk_1073741849_1034 (size=6119) 2024-11-25T07:31:02,801 INFO [M:0;5eb3d201e8c9:43681 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f00012d7510488fb9f4391ad1a39089 2024-11-25T07:31:02,820 DEBUG [M:0;5eb3d201e8c9:43681 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/20d8dad98c83424cab0e8ce41d9d40e5 is 69, key is 5eb3d201e8c9,42547,1732519832962/rs:state/1732519833199/Put/seqid=0 2024-11-25T07:31:02,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33397 is added to blk_1073741850_1035 (size=5156) 2024-11-25T07:31:02,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741850_1035 (size=5156) 2024-11-25T07:31:02,825 INFO [M:0;5eb3d201e8c9:43681 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/20d8dad98c83424cab0e8ce41d9d40e5 2024-11-25T07:31:02,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:31:02,827 INFO [RS:0;5eb3d201e8c9:42547 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:31:02,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42547-0x1014e0897cb0001, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:31:02,827 INFO [RS:0;5eb3d201e8c9:42547 {}] regionserver.HRegionServer(1031): Exiting; stopping=5eb3d201e8c9,42547,1732519832962; zookeeper connection closed. 2024-11-25T07:31:02,827 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@780156fe {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@780156fe 2024-11-25T07:31:02,828 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T07:31:02,846 DEBUG [M:0;5eb3d201e8c9:43681 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c5b81e3a0c4486fb1c240a25aa4f99c is 52, key is load_balancer_on/state:d/1732519833889/Put/seqid=0 2024-11-25T07:31:02,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741851_1036 (size=5056) 2024-11-25T07:31:02,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33397 is added to blk_1073741851_1036 (size=5056) 2024-11-25T07:31:02,852 INFO [M:0;5eb3d201e8c9:43681 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c5b81e3a0c4486fb1c240a25aa4f99c 2024-11-25T07:31:02,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:02,857 DEBUG [M:0;5eb3d201e8c9:43681 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/273e1989bb1c4d3187076d5e5c376151 as hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/273e1989bb1c4d3187076d5e5c376151 2024-11-25T07:31:02,862 INFO [M:0;5eb3d201e8c9:43681 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/273e1989bb1c4d3187076d5e5c376151, entries=8, sequenceid=56, filesize=5.5 K 2024-11-25T07:31:02,863 DEBUG [M:0;5eb3d201e8c9:43681 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f00012d7510488fb9f4391ad1a39089 as hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5f00012d7510488fb9f4391ad1a39089 2024-11-25T07:31:02,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:02,869 INFO [M:0;5eb3d201e8c9:43681 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5f00012d7510488fb9f4391ad1a39089, entries=6, sequenceid=56, filesize=6.0 K 2024-11-25T07:31:02,870 DEBUG [M:0;5eb3d201e8c9:43681 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/20d8dad98c83424cab0e8ce41d9d40e5 as hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/20d8dad98c83424cab0e8ce41d9d40e5 2024-11-25T07:31:02,875 INFO [M:0;5eb3d201e8c9:43681 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/20d8dad98c83424cab0e8ce41d9d40e5, entries=1, sequenceid=56, filesize=5.0 K 2024-11-25T07:31:02,876 DEBUG [M:0;5eb3d201e8c9:43681 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c5b81e3a0c4486fb1c240a25aa4f99c as hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3c5b81e3a0c4486fb1c240a25aa4f99c 2024-11-25T07:31:02,881 INFO [M:0;5eb3d201e8c9:43681 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3c5b81e3a0c4486fb1c240a25aa4f99c, entries=1, sequenceid=56, filesize=4.9 K 2024-11-25T07:31:02,882 INFO [M:0;5eb3d201e8c9:43681 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=56, compaction requested=false 2024-11-25T07:31:02,883 INFO [M:0;5eb3d201e8c9:43681 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:31:02,884 DEBUG [M:0;5eb3d201e8c9:43681 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519862738Disabling compacts and flushes for region at 1732519862738Disabling writes for close at 1732519862739 (+1 ms)Obtaining lock to block concurrent updates at 1732519862739Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732519862739Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732519862739Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732519862751 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732519862751Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732519862770 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732519862770Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732519862781 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732519862795 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732519862795Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732519862805 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732519862819 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732519862819Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732519862830 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732519862846 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732519862846Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3de05362: reopening flushed file at 1732519862857 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f957899: reopening flushed file at 1732519862862 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@173591de: reopening flushed file at 1732519862869 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@252b8562: reopening flushed file at 1732519862875 (+6 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=56, compaction requested=false at 1732519862882 (+7 ms)Writing region close event to WAL at 1732519862883 (+1 ms)Closed at 1732519862883 2024-11-25T07:31:02,884 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,884 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,884 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,884 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,884 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:02,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32883 is added to blk_1073741847_1031 (size=757) 2024-11-25T07:31:02,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33397 is added to blk_1073741847_1031 (size=757) 2024-11-25T07:31:02,889 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T07:31:03,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,829 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:03,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:03,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:04,354 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T07:31:04,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,375 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,375 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,375 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:04,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:04,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:05,689 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-25T07:31:05,761 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:31:05,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T07:31:05,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-25T07:31:05,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-25T07:31:05,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:05,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:06,748 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/WALs/5eb3d201e8c9,43681,1732519832906/5eb3d201e8c9%2C43681%2C1732519832906.1732519833053 after 4001ms 2024-11-25T07:31:06,748 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/WALs/5eb3d201e8c9,43681,1732519832906/5eb3d201e8c9%2C43681%2C1732519832906.1732519833053 to hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/oldWALs/5eb3d201e8c9%2C43681%2C1732519832906.1732519833053 2024-11-25T07:31:06,751 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/MasterData/oldWALs/5eb3d201e8c9%2C43681%2C1732519832906.1732519833053 to hdfs://localhost:46285/user/jenkins/test-data/41f091f2-c86e-3ef5-6e11-d1836df4ad74/oldWALs/5eb3d201e8c9%2C43681%2C1732519832906.1732519833053$masterlocalwal$ 2024-11-25T07:31:06,751 INFO [M:0;5eb3d201e8c9:43681 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T07:31:06,751 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:31:06,751 INFO [M:0;5eb3d201e8c9:43681 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43681 2024-11-25T07:31:06,751 INFO [M:0;5eb3d201e8c9:43681 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:31:06,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:31:06,853 INFO [M:0;5eb3d201e8c9:43681 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:31:06,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43681-0x1014e0897cb0000, quorum=127.0.0.1:60916, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:31:06,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:06,855 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c3c718b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:31:06,856 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7df90452{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:31:06,856 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:31:06,856 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58c81822{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:31:06,856 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ae70be3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,STOPPED} 2024-11-25T07:31:06,857 WARN [BP-704301945-172.17.0.2-1732519832220 heartbeating to localhost/127.0.0.1:46285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:31:06,857 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:31:06,857 WARN [BP-704301945-172.17.0.2-1732519832220 heartbeating to localhost/127.0.0.1:46285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-704301945-172.17.0.2-1732519832220 (Datanode Uuid 716db90c-d81d-4ded-bc47-0f24b99e4835) service to localhost/127.0.0.1:46285 2024-11-25T07:31:06,857 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:31:06,858 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data3/current/BP-704301945-172.17.0.2-1732519832220 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:31:06,858 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data4/current/BP-704301945-172.17.0.2-1732519832220 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:31:06,858 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:31:06,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3affdf32{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:31:06,860 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ebbad67{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:31:06,860 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:31:06,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21404da7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:31:06,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4307cd3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,STOPPED} 2024-11-25T07:31:06,861 WARN [BP-704301945-172.17.0.2-1732519832220 heartbeating to localhost/127.0.0.1:46285 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:31:06,861 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:31:06,861 WARN [BP-704301945-172.17.0.2-1732519832220 heartbeating to localhost/127.0.0.1:46285 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-704301945-172.17.0.2-1732519832220 (Datanode Uuid b944ae9b-1e3c-4ad6-89ea-1fd1dbc1c46c) service to localhost/127.0.0.1:46285 2024-11-25T07:31:06,861 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:31:06,862 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data1/current/BP-704301945-172.17.0.2-1732519832220 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:31:06,862 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/cluster_3a19c4b0-0fb3-d5c1-0ba1-8cf58f1c8a7a/data/data2/current/BP-704301945-172.17.0.2-1732519832220 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:31:06,862 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:31:06,868 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@33255ae1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:31:06,868 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b6caabd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:31:06,868 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:31:06,868 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25f63c50{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:31:06,868 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e3b8424{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir/,STOPPED} 2024-11-25T07:31:06,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:06,874 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T07:31:06,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T07:31:06,899 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46285 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:46285 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46285 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46285 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:46285 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:46285 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46285 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:46285 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 446) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=111 (was 199), ProcessCount=11 (was 11), AvailableMemoryMB=8022 (was 8177) 2024-11-25T07:31:06,906 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=111, ProcessCount=11, AvailableMemoryMB=8022 2024-11-25T07:31:06,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T07:31:06,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.log.dir so I do NOT create it in target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b 2024-11-25T07:31:06,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/68338220-8420-5dff-ceaa-f6d618529025/hadoop.tmp.dir so I do NOT create it in target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b 2024-11-25T07:31:06,906 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/cluster_6aac291b-a7c3-3ae9-e9e5-9d2903ea4436, deleteOnExit=true 2024-11-25T07:31:06,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T07:31:06,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/test.cache.data in system properties and HBase conf 2024-11-25T07:31:06,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/hadoop.log.dir in system properties and HBase conf 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T07:31:06,907 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T07:31:06,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/nfs.dump.dir in system properties and HBase conf 2024-11-25T07:31:06,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/java.io.tmpdir in system properties and HBase conf 2024-11-25T07:31:06,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:31:06,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T07:31:06,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T07:31:06,921 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:31:06,986 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:31:06,990 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:31:06,991 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:31:06,991 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:31:06,991 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:31:06,992 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:31:06,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5838a3fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:31:06,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b93cee9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:31:07,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1592e8d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/java.io.tmpdir/jetty-localhost-43895-hadoop-hdfs-3_4_1-tests_jar-_-any-5719439365714633989/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:31:07,112 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68744dfe{HTTP/1.1, (http/1.1)}{localhost:43895} 2024-11-25T07:31:07,112 INFO [Time-limited test {}] server.Server(415): Started @186959ms 2024-11-25T07:31:07,124 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:31:07,172 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:31:07,175 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:31:07,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:31:07,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:31:07,176 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:31:07,176 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c243e85{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:31:07,176 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ed77c81{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:31:07,291 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@16a85bb6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/java.io.tmpdir/jetty-localhost-40457-hadoop-hdfs-3_4_1-tests_jar-_-any-6361397983095376748/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:31:07,291 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5cac9c50{HTTP/1.1, (http/1.1)}{localhost:40457} 2024-11-25T07:31:07,291 INFO [Time-limited test {}] server.Server(415): Started @187138ms 2024-11-25T07:31:07,293 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:31:07,325 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:31:07,328 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:31:07,329 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:31:07,329 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:31:07,329 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:31:07,329 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e6bebf5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:31:07,330 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c1ed8d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:31:07,394 WARN [Thread-1640 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/cluster_6aac291b-a7c3-3ae9-e9e5-9d2903ea4436/data/data1/current/BP-1717048765-172.17.0.2-1732519866936/current, will proceed with Du for space computation calculation, 2024-11-25T07:31:07,394 WARN [Thread-1641 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/cluster_6aac291b-a7c3-3ae9-e9e5-9d2903ea4436/data/data2/current/BP-1717048765-172.17.0.2-1732519866936/current, will proceed with Du for space computation calculation, 2024-11-25T07:31:07,409 WARN [Thread-1619 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:31:07,412 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x701ee49dfe8da44c with lease ID 0xadc393a987d4ebc3: Processing first storage report for DS-c57f40f1-8880-44fd-963a-d6f305e2bb42 from datanode DatanodeRegistration(127.0.0.1:45489, datanodeUuid=e4ec1b39-39eb-43f0-9aad-5277b2fc31f8, infoPort=33541, infoSecurePort=0, ipcPort=40963, storageInfo=lv=-57;cid=testClusterID;nsid=286617981;c=1732519866936) 2024-11-25T07:31:07,412 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x701ee49dfe8da44c with lease ID 0xadc393a987d4ebc3: from storage DS-c57f40f1-8880-44fd-963a-d6f305e2bb42 node DatanodeRegistration(127.0.0.1:45489, datanodeUuid=e4ec1b39-39eb-43f0-9aad-5277b2fc31f8, infoPort=33541, infoSecurePort=0, ipcPort=40963, storageInfo=lv=-57;cid=testClusterID;nsid=286617981;c=1732519866936), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:31:07,412 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x701ee49dfe8da44c with lease ID 0xadc393a987d4ebc3: Processing first storage report for DS-db17b58b-5db2-4030-b5bf-bfcd9318debd from datanode DatanodeRegistration(127.0.0.1:45489, datanodeUuid=e4ec1b39-39eb-43f0-9aad-5277b2fc31f8, infoPort=33541, infoSecurePort=0, ipcPort=40963, storageInfo=lv=-57;cid=testClusterID;nsid=286617981;c=1732519866936) 2024-11-25T07:31:07,412 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x701ee49dfe8da44c with lease ID 0xadc393a987d4ebc3: from storage DS-db17b58b-5db2-4030-b5bf-bfcd9318debd node DatanodeRegistration(127.0.0.1:45489, datanodeUuid=e4ec1b39-39eb-43f0-9aad-5277b2fc31f8, infoPort=33541, infoSecurePort=0, ipcPort=40963, storageInfo=lv=-57;cid=testClusterID;nsid=286617981;c=1732519866936), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:31:07,446 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@770e751e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/java.io.tmpdir/jetty-localhost-40435-hadoop-hdfs-3_4_1-tests_jar-_-any-640942850310950968/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:31:07,446 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5828d739{HTTP/1.1, (http/1.1)}{localhost:40435} 2024-11-25T07:31:07,446 INFO [Time-limited test {}] server.Server(415): Started @187293ms 2024-11-25T07:31:07,448 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:31:07,550 WARN [Thread-1666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/cluster_6aac291b-a7c3-3ae9-e9e5-9d2903ea4436/data/data3/current/BP-1717048765-172.17.0.2-1732519866936/current, will proceed with Du for space computation calculation, 2024-11-25T07:31:07,550 WARN [Thread-1667 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/cluster_6aac291b-a7c3-3ae9-e9e5-9d2903ea4436/data/data4/current/BP-1717048765-172.17.0.2-1732519866936/current, will proceed with Du for space computation calculation, 2024-11-25T07:31:07,566 WARN [Thread-1655 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:31:07,569 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf23bc1e3ff0f8417 with lease ID 0xadc393a987d4ebc4: Processing first storage report for DS-e43006f4-5a9b-40cf-a710-087ac7d01164 from datanode DatanodeRegistration(127.0.0.1:43399, datanodeUuid=3b993c24-538d-4754-b757-0aece92ef136, infoPort=44765, infoSecurePort=0, ipcPort=36057, storageInfo=lv=-57;cid=testClusterID;nsid=286617981;c=1732519866936) 2024-11-25T07:31:07,569 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf23bc1e3ff0f8417 with lease ID 0xadc393a987d4ebc4: from storage DS-e43006f4-5a9b-40cf-a710-087ac7d01164 node DatanodeRegistration(127.0.0.1:43399, datanodeUuid=3b993c24-538d-4754-b757-0aece92ef136, infoPort=44765, infoSecurePort=0, ipcPort=36057, storageInfo=lv=-57;cid=testClusterID;nsid=286617981;c=1732519866936), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:31:07,569 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf23bc1e3ff0f8417 with lease ID 0xadc393a987d4ebc4: Processing first storage report for DS-31dbdf3d-9c1c-412c-85a5-c3862a6ea427 from datanode DatanodeRegistration(127.0.0.1:43399, datanodeUuid=3b993c24-538d-4754-b757-0aece92ef136, infoPort=44765, infoSecurePort=0, ipcPort=36057, storageInfo=lv=-57;cid=testClusterID;nsid=286617981;c=1732519866936) 2024-11-25T07:31:07,569 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf23bc1e3ff0f8417 with lease ID 0xadc393a987d4ebc4: from storage DS-31dbdf3d-9c1c-412c-85a5-c3862a6ea427 node DatanodeRegistration(127.0.0.1:43399, datanodeUuid=3b993c24-538d-4754-b757-0aece92ef136, infoPort=44765, infoSecurePort=0, ipcPort=36057, storageInfo=lv=-57;cid=testClusterID;nsid=286617981;c=1732519866936), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:31:07,670 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b 2024-11-25T07:31:07,672 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/cluster_6aac291b-a7c3-3ae9-e9e5-9d2903ea4436/zookeeper_0, clientPort=60919, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/cluster_6aac291b-a7c3-3ae9-e9e5-9d2903ea4436/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/cluster_6aac291b-a7c3-3ae9-e9e5-9d2903ea4436/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T07:31:07,673 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60919 2024-11-25T07:31:07,673 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:31:07,674 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:31:07,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:31:07,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:31:07,684 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b with version=8 2024-11-25T07:31:07,684 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/hbase-staging 2024-11-25T07:31:07,686 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:31:07,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:31:07,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:31:07,686 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:31:07,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:31:07,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:31:07,686 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T07:31:07,686 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:31:07,687 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37793 2024-11-25T07:31:07,688 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37793 connecting to ZooKeeper ensemble=127.0.0.1:60919 2024-11-25T07:31:07,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:377930x0, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:31:07,696 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37793-0x1014e091fa70000 connected 2024-11-25T07:31:07,714 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:31:07,715 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:31:07,717 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:31:07,717 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b, hbase.cluster.distributed=false 2024-11-25T07:31:07,718 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:31:07,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37793 2024-11-25T07:31:07,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37793 2024-11-25T07:31:07,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37793 2024-11-25T07:31:07,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37793 2024-11-25T07:31:07,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37793 2024-11-25T07:31:07,735 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:31:07,735 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:31:07,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:31:07,736 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:31:07,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:31:07,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:31:07,736 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T07:31:07,736 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:31:07,737 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34723 2024-11-25T07:31:07,738 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34723 connecting to ZooKeeper ensemble=127.0.0.1:60919 2024-11-25T07:31:07,739 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:31:07,740 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:31:07,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:347230x0, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:31:07,745 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:347230x0, quorum=127.0.0.1:60919, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:31:07,745 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34723-0x1014e091fa70001 connected 2024-11-25T07:31:07,745 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T07:31:07,746 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T07:31:07,746 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T07:31:07,747 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:31:07,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34723 2024-11-25T07:31:07,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34723 2024-11-25T07:31:07,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34723 2024-11-25T07:31:07,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34723 2024-11-25T07:31:07,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34723 2024-11-25T07:31:07,766 DEBUG [M:0;5eb3d201e8c9:37793 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5eb3d201e8c9:37793 2024-11-25T07:31:07,766 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5eb3d201e8c9,37793,1732519867685 2024-11-25T07:31:07,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:31:07,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:31:07,768 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5eb3d201e8c9,37793,1732519867685 2024-11-25T07:31:07,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T07:31:07,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:07,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:07,771 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T07:31:07,772 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5eb3d201e8c9,37793,1732519867685 from backup master directory 2024-11-25T07:31:07,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5eb3d201e8c9,37793,1732519867685 2024-11-25T07:31:07,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:31:07,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:31:07,773 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:31:07,773 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5eb3d201e8c9,37793,1732519867685 2024-11-25T07:31:07,777 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/hbase.id] with ID: e0b5d98a-1f78-409f-bb46-6789526de9b2 2024-11-25T07:31:07,777 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/.tmp/hbase.id 2024-11-25T07:31:07,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:31:07,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:31:07,786 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/.tmp/hbase.id]:[hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/hbase.id] 2024-11-25T07:31:07,796 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:31:07,796 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T07:31:07,797 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T07:31:07,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:07,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:07,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:31:07,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:31:07,806 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T07:31:07,806 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T07:31:07,806 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:31:07,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:31:07,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:31:07,814 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store 2024-11-25T07:31:07,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:31:07,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:31:07,821 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:31:07,821 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:31:07,821 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:31:07,821 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:31:07,821 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:31:07,821 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:31:07,821 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:31:07,821 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519867821Disabling compacts and flushes for region at 1732519867821Disabling writes for close at 1732519867821Writing region close event to WAL at 1732519867821Closed at 1732519867821 2024-11-25T07:31:07,822 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/.initializing 2024-11-25T07:31:07,822 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/WALs/5eb3d201e8c9,37793,1732519867685 2024-11-25T07:31:07,824 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C37793%2C1732519867685, suffix=, logDir=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/WALs/5eb3d201e8c9,37793,1732519867685, archiveDir=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/oldWALs, maxLogs=10 2024-11-25T07:31:07,825 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C37793%2C1732519867685.1732519867824 2024-11-25T07:31:07,829 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/WALs/5eb3d201e8c9,37793,1732519867685/5eb3d201e8c9%2C37793%2C1732519867685.1732519867824 2024-11-25T07:31:07,833 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44765:44765),(127.0.0.1/127.0.0.1:33541:33541)] 2024-11-25T07:31:07,833 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:31:07,834 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:31:07,834 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:31:07,834 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:31:07,835 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:31:07,836 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T07:31:07,836 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:07,837 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:31:07,837 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:31:07,838 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T07:31:07,838 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:07,838 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:31:07,838 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:31:07,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T07:31:07,839 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:07,840 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:31:07,840 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:31:07,841 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T07:31:07,841 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:07,841 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:31:07,841 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:31:07,842 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:31:07,842 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:31:07,844 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:31:07,844 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:31:07,845 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T07:31:07,846 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:31:07,848 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:31:07,848 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=723291, jitterRate=-0.080288827419281}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T07:31:07,849 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732519867834Initializing all the Stores at 1732519867835 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519867835Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519867835Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519867835Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519867835Cleaning up temporary data from old regions at 1732519867844 (+9 ms)Region opened successfully at 1732519867849 (+5 ms) 2024-11-25T07:31:07,849 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T07:31:07,852 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@718f9fe1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:31:07,853 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T07:31:07,853 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T07:31:07,853 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T07:31:07,853 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T07:31:07,854 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T07:31:07,854 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T07:31:07,854 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T07:31:07,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:07,857 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T07:31:07,858 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T07:31:07,859 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T07:31:07,859 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T07:31:07,860 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T07:31:07,862 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T07:31:07,863 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T07:31:07,864 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T07:31:07,865 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T07:31:07,866 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T07:31:07,867 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T07:31:07,868 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T07:31:07,869 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T07:31:07,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:07,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:31:07,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:31:07,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:07,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:07,873 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5eb3d201e8c9,37793,1732519867685, sessionid=0x1014e091fa70000, setting cluster-up flag (Was=false) 2024-11-25T07:31:07,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:07,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:07,879 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T07:31:07,880 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,37793,1732519867685 2024-11-25T07:31:07,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:07,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:07,888 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T07:31:07,889 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,37793,1732519867685 2024-11-25T07:31:07,890 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T07:31:07,892 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T07:31:07,892 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T07:31:07,892 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T07:31:07,892 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5eb3d201e8c9,37793,1732519867685 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T07:31:07,894 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:31:07,894 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:31:07,894 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:31:07,894 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:31:07,894 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5eb3d201e8c9:0, corePoolSize=10, maxPoolSize=10 2024-11-25T07:31:07,894 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:31:07,894 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:31:07,894 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:31:07,896 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732519897895 2024-11-25T07:31:07,896 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T07:31:07,896 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T07:31:07,896 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T07:31:07,896 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T07:31:07,896 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T07:31:07,896 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T07:31:07,896 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:07,896 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:31:07,896 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T07:31:07,896 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T07:31:07,896 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T07:31:07,897 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T07:31:07,897 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T07:31:07,897 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T07:31:07,897 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519867897,5,FailOnTimeoutGroup] 2024-11-25T07:31:07,897 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:07,897 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T07:31:07,900 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519867897,5,FailOnTimeoutGroup] 2024-11-25T07:31:07,900 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:07,900 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T07:31:07,900 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:07,900 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:07,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:31:07,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:31:07,956 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(746): ClusterId : e0b5d98a-1f78-409f-bb46-6789526de9b2 2024-11-25T07:31:07,956 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T07:31:07,958 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T07:31:07,958 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T07:31:07,961 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T07:31:07,961 DEBUG [RS:0;5eb3d201e8c9:34723 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@287610c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:31:07,973 DEBUG [RS:0;5eb3d201e8c9:34723 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5eb3d201e8c9:34723 2024-11-25T07:31:07,973 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T07:31:07,973 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T07:31:07,973 DEBUG [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T07:31:07,974 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(2659): reportForDuty to master=5eb3d201e8c9,37793,1732519867685 with port=34723, startcode=1732519867735 2024-11-25T07:31:07,974 DEBUG [RS:0;5eb3d201e8c9:34723 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T07:31:07,976 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56491, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T07:31:07,976 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37793 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5eb3d201e8c9,34723,1732519867735 2024-11-25T07:31:07,976 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37793 {}] master.ServerManager(517): Registering regionserver=5eb3d201e8c9,34723,1732519867735 2024-11-25T07:31:07,978 DEBUG [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b 2024-11-25T07:31:07,978 DEBUG [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39459 2024-11-25T07:31:07,978 DEBUG [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T07:31:07,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:31:07,980 DEBUG [RS:0;5eb3d201e8c9:34723 {}] zookeeper.ZKUtil(111): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5eb3d201e8c9,34723,1732519867735 2024-11-25T07:31:07,980 WARN [RS:0;5eb3d201e8c9:34723 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:31:07,980 INFO [RS:0;5eb3d201e8c9:34723 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:31:07,980 DEBUG [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735 2024-11-25T07:31:07,981 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5eb3d201e8c9,34723,1732519867735] 2024-11-25T07:31:07,984 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T07:31:07,985 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T07:31:07,985 INFO [RS:0;5eb3d201e8c9:34723 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T07:31:07,985 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:07,985 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T07:31:07,986 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T07:31:07,986 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:07,986 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:31:07,986 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:31:07,986 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:31:07,986 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:31:07,986 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:31:07,986 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:31:07,986 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:31:07,986 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:31:07,986 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:31:07,987 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:31:07,987 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:31:07,987 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:31:07,987 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:31:07,987 DEBUG [RS:0;5eb3d201e8c9:34723 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:31:07,987 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:07,987 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:07,987 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:07,987 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:07,987 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:07,987 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,34723,1732519867735-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:31:08,002 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T07:31:08,002 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,34723,1732519867735-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:08,003 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:08,003 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.Replication(171): 5eb3d201e8c9,34723,1732519867735 started 2024-11-25T07:31:08,016 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:08,016 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(1482): Serving as 5eb3d201e8c9,34723,1732519867735, RpcServer on 5eb3d201e8c9/172.17.0.2:34723, sessionid=0x1014e091fa70001 2024-11-25T07:31:08,016 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T07:31:08,016 DEBUG [RS:0;5eb3d201e8c9:34723 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5eb3d201e8c9,34723,1732519867735 2024-11-25T07:31:08,016 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,34723,1732519867735' 2024-11-25T07:31:08,016 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T07:31:08,017 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T07:31:08,017 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T07:31:08,017 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T07:31:08,017 DEBUG [RS:0;5eb3d201e8c9:34723 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5eb3d201e8c9,34723,1732519867735 2024-11-25T07:31:08,017 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,34723,1732519867735' 2024-11-25T07:31:08,017 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T07:31:08,018 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T07:31:08,018 DEBUG [RS:0;5eb3d201e8c9:34723 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T07:31:08,018 INFO [RS:0;5eb3d201e8c9:34723 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T07:31:08,018 INFO [RS:0;5eb3d201e8c9:34723 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T07:31:08,120 INFO [RS:0;5eb3d201e8c9:34723 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C34723%2C1732519867735, suffix=, logDir=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735, archiveDir=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/oldWALs, maxLogs=32 2024-11-25T07:31:08,120 INFO [RS:0;5eb3d201e8c9:34723 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C34723%2C1732519867735.1732519868120 2024-11-25T07:31:08,126 INFO [RS:0;5eb3d201e8c9:34723 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735/5eb3d201e8c9%2C34723%2C1732519867735.1732519868120 2024-11-25T07:31:08,130 DEBUG [RS:0;5eb3d201e8c9:34723 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33541:33541),(127.0.0.1/127.0.0.1:44765:44765)] 2024-11-25T07:31:08,306 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T07:31:08,307 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b 2024-11-25T07:31:08,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741833_1009 (size=32) 2024-11-25T07:31:08,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741833_1009 (size=32) 2024-11-25T07:31:08,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:31:08,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:31:08,316 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:31:08,316 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:08,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:31:08,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:31:08,318 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:31:08,318 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:08,318 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:31:08,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:31:08,320 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:31:08,320 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:08,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:31:08,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:31:08,321 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:31:08,321 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:08,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:31:08,322 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:31:08,322 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740 2024-11-25T07:31:08,323 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740 2024-11-25T07:31:08,324 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:31:08,324 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:31:08,324 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:31:08,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:31:08,327 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:31:08,328 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762552, jitterRate=-0.030365288257598877}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:31:08,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732519868314Initializing all the Stores at 1732519868314Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519868314Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519868315 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519868315Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519868315Cleaning up temporary data from old regions at 1732519868324 (+9 ms)Region opened successfully at 1732519868328 (+4 ms) 2024-11-25T07:31:08,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:31:08,328 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:31:08,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:31:08,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:31:08,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:31:08,329 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:31:08,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519868328Disabling compacts and flushes for region at 1732519868328Disabling writes for close at 1732519868328Writing region close event to WAL at 1732519868329 (+1 ms)Closed at 1732519868329 2024-11-25T07:31:08,330 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:31:08,330 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T07:31:08,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T07:31:08,332 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:31:08,333 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T07:31:08,483 DEBUG [5eb3d201e8c9:37793 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T07:31:08,484 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5eb3d201e8c9,34723,1732519867735 2024-11-25T07:31:08,485 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,34723,1732519867735, state=OPENING 2024-11-25T07:31:08,486 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T07:31:08,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:08,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:08,488 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:31:08,488 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:31:08,488 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:31:08,488 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,34723,1732519867735}] 2024-11-25T07:31:08,641 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T07:31:08,643 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43199, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T07:31:08,647 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T07:31:08,647 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:31:08,648 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C34723%2C1732519867735.meta, suffix=.meta, logDir=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735, archiveDir=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/oldWALs, maxLogs=32 2024-11-25T07:31:08,649 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C34723%2C1732519867735.meta.1732519868649.meta 2024-11-25T07:31:08,653 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735/5eb3d201e8c9%2C34723%2C1732519867735.meta.1732519868649.meta 2024-11-25T07:31:08,654 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44765:44765),(127.0.0.1/127.0.0.1:33541:33541)] 2024-11-25T07:31:08,655 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:31:08,655 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T07:31:08,655 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T07:31:08,656 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T07:31:08,656 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T07:31:08,656 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:31:08,656 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T07:31:08,656 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T07:31:08,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:31:08,658 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:31:08,658 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:08,658 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:31:08,658 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:31:08,659 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:31:08,659 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:08,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:31:08,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:31:08,660 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:31:08,660 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:08,660 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:31:08,660 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:31:08,661 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:31:08,661 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:08,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:31:08,661 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:31:08,662 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740 2024-11-25T07:31:08,663 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740 2024-11-25T07:31:08,664 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:31:08,664 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:31:08,665 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:31:08,666 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:31:08,667 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=699506, jitterRate=-0.11053287982940674}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:31:08,667 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T07:31:08,668 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732519868656Writing region info on filesystem at 1732519868656Initializing all the Stores at 1732519868657 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519868657Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519868657Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519868657Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519868657Cleaning up temporary data from old regions at 1732519868664 (+7 ms)Running coprocessor post-open hooks at 1732519868667 (+3 ms)Region opened successfully at 1732519868668 (+1 ms) 2024-11-25T07:31:08,669 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732519868641 2024-11-25T07:31:08,671 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T07:31:08,671 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T07:31:08,672 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5eb3d201e8c9,34723,1732519867735 2024-11-25T07:31:08,673 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,34723,1732519867735, state=OPEN 2024-11-25T07:31:08,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:31:08,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:31:08,678 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,34723,1732519867735 2024-11-25T07:31:08,678 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:31:08,678 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:31:08,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T07:31:08,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,34723,1732519867735 in 190 msec 2024-11-25T07:31:08,683 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T07:31:08,683 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 350 msec 2024-11-25T07:31:08,684 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:31:08,684 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T07:31:08,685 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:31:08,685 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,34723,1732519867735, seqNum=-1] 2024-11-25T07:31:08,685 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:31:08,687 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51127, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:31:08,692 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 800 msec 2024-11-25T07:31:08,692 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732519868692, completionTime=-1 2024-11-25T07:31:08,692 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T07:31:08,692 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T07:31:08,694 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T07:31:08,694 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732519928694 2024-11-25T07:31:08,694 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732519988694 2024-11-25T07:31:08,694 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-25T07:31:08,694 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,37793,1732519867685-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:08,694 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,37793,1732519867685-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:08,694 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,37793,1732519867685-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:08,694 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5eb3d201e8c9:37793, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:08,694 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:08,695 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:08,696 DEBUG [master/5eb3d201e8c9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T07:31:08,698 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.924sec 2024-11-25T07:31:08,698 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T07:31:08,698 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T07:31:08,698 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T07:31:08,698 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T07:31:08,698 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T07:31:08,698 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,37793,1732519867685-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:31:08,698 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,37793,1732519867685-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T07:31:08,701 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T07:31:08,701 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T07:31:08,701 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,37793,1732519867685-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:08,756 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@275d5952, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:31:08,756 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5eb3d201e8c9,37793,-1 for getting cluster id 2024-11-25T07:31:08,757 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T07:31:08,758 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e0b5d98a-1f78-409f-bb46-6789526de9b2' 2024-11-25T07:31:08,759 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T07:31:08,759 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e0b5d98a-1f78-409f-bb46-6789526de9b2" 2024-11-25T07:31:08,759 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55f18070, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:31:08,759 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5eb3d201e8c9,37793,-1] 2024-11-25T07:31:08,759 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T07:31:08,760 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:31:08,761 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53564, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T07:31:08,762 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35909cb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:31:08,762 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:31:08,763 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,34723,1732519867735, seqNum=-1] 2024-11-25T07:31:08,763 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:31:08,764 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39528, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:31:08,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5eb3d201e8c9,37793,1732519867685 2024-11-25T07:31:08,766 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:31:08,769 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T07:31:08,769 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T07:31:08,770 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 5eb3d201e8c9,37793,1732519867685 2024-11-25T07:31:08,770 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2f022144 2024-11-25T07:31:08,770 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T07:31:08,771 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53578, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T07:31:08,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T07:31:08,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T07:31:08,771 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T07:31:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T07:31:08,774 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T07:31:08,774 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:08,774 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-25T07:31:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T07:31:08,775 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T07:31:08,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741835_1011 (size=405) 2024-11-25T07:31:08,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741835_1011 (size=405) 2024-11-25T07:31:08,783 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => aee4071b6bb6842737b95c6ae9831dd6, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b 2024-11-25T07:31:08,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741836_1012 (size=88) 2024-11-25T07:31:08,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741836_1012 (size=88) 2024-11-25T07:31:08,789 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:31:08,789 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing aee4071b6bb6842737b95c6ae9831dd6, disabling compactions & flushes 2024-11-25T07:31:08,789 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:08,789 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:08,789 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. after waiting 0 ms 2024-11-25T07:31:08,789 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:08,789 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:08,789 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for aee4071b6bb6842737b95c6ae9831dd6: Waiting for close lock at 1732519868789Disabling compacts and flushes for region at 1732519868789Disabling writes for close at 1732519868789Writing region close event to WAL at 1732519868789Closed at 1732519868789 2024-11-25T07:31:08,790 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T07:31:08,790 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732519868790"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732519868790"}]},"ts":"1732519868790"} 2024-11-25T07:31:08,792 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T07:31:08,793 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T07:31:08,793 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732519868793"}]},"ts":"1732519868793"} 2024-11-25T07:31:08,795 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-25T07:31:08,796 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aee4071b6bb6842737b95c6ae9831dd6, ASSIGN}] 2024-11-25T07:31:08,797 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aee4071b6bb6842737b95c6ae9831dd6, ASSIGN 2024-11-25T07:31:08,798 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aee4071b6bb6842737b95c6ae9831dd6, ASSIGN; state=OFFLINE, location=5eb3d201e8c9,34723,1732519867735; forceNewPlan=false, retain=false 2024-11-25T07:31:08,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:08,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:08,948 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aee4071b6bb6842737b95c6ae9831dd6, regionState=OPENING, regionLocation=5eb3d201e8c9,34723,1732519867735 2024-11-25T07:31:08,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aee4071b6bb6842737b95c6ae9831dd6, ASSIGN because future has completed 2024-11-25T07:31:08,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aee4071b6bb6842737b95c6ae9831dd6, server=5eb3d201e8c9,34723,1732519867735}] 2024-11-25T07:31:09,107 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:09,108 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => aee4071b6bb6842737b95c6ae9831dd6, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6.', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:31:09,108 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling aee4071b6bb6842737b95c6ae9831dd6 2024-11-25T07:31:09,108 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:31:09,108 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for aee4071b6bb6842737b95c6ae9831dd6 2024-11-25T07:31:09,108 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for aee4071b6bb6842737b95c6ae9831dd6 2024-11-25T07:31:09,109 INFO [StoreOpener-aee4071b6bb6842737b95c6ae9831dd6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region aee4071b6bb6842737b95c6ae9831dd6 2024-11-25T07:31:09,111 INFO [StoreOpener-aee4071b6bb6842737b95c6ae9831dd6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aee4071b6bb6842737b95c6ae9831dd6 columnFamilyName info 2024-11-25T07:31:09,111 DEBUG [StoreOpener-aee4071b6bb6842737b95c6ae9831dd6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:31:09,111 INFO [StoreOpener-aee4071b6bb6842737b95c6ae9831dd6-1 {}] regionserver.HStore(327): Store=aee4071b6bb6842737b95c6ae9831dd6/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:31:09,111 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for aee4071b6bb6842737b95c6ae9831dd6 2024-11-25T07:31:09,112 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6 2024-11-25T07:31:09,112 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6 2024-11-25T07:31:09,113 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for aee4071b6bb6842737b95c6ae9831dd6 2024-11-25T07:31:09,113 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for aee4071b6bb6842737b95c6ae9831dd6 2024-11-25T07:31:09,114 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for aee4071b6bb6842737b95c6ae9831dd6 2024-11-25T07:31:09,116 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:31:09,116 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened aee4071b6bb6842737b95c6ae9831dd6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=881963, jitterRate=0.1214742511510849}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T07:31:09,117 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for aee4071b6bb6842737b95c6ae9831dd6 2024-11-25T07:31:09,117 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for aee4071b6bb6842737b95c6ae9831dd6: Running coprocessor pre-open hook at 1732519869108Writing region info on filesystem at 1732519869108Initializing all the Stores at 1732519869109 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519869109Cleaning up temporary data from old regions at 1732519869113 (+4 ms)Running coprocessor post-open hooks at 1732519869117 (+4 ms)Region opened successfully at 1732519869117 2024-11-25T07:31:09,118 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6., pid=6, masterSystemTime=1732519869104 2024-11-25T07:31:09,121 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:09,121 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:09,122 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aee4071b6bb6842737b95c6ae9831dd6, regionState=OPEN, openSeqNum=2, regionLocation=5eb3d201e8c9,34723,1732519867735 2024-11-25T07:31:09,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aee4071b6bb6842737b95c6ae9831dd6, server=5eb3d201e8c9,34723,1732519867735 because future has completed 2024-11-25T07:31:09,129 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T07:31:09,129 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure aee4071b6bb6842737b95c6ae9831dd6, server=5eb3d201e8c9,34723,1732519867735 in 175 msec 2024-11-25T07:31:09,131 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T07:31:09,131 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=aee4071b6bb6842737b95c6ae9831dd6, ASSIGN in 334 msec 2024-11-25T07:31:09,132 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T07:31:09,133 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732519869132"}]},"ts":"1732519869132"} 2024-11-25T07:31:09,135 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-25T07:31:09,136 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T07:31:09,137 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 365 msec 2024-11-25T07:31:09,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:09,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:10,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:10,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:11,264 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T07:31:11,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:31:11,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:11,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:12,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:12,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:13,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:13,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:13,984 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T07:31:13,984 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-25T07:31:14,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:14,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:15,761 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T07:31:15,762 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-25T07:31:15,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:31:15,762 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-25T07:31:15,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-25T07:31:15,762 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-25T07:31:15,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T07:31:15,763 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-25T07:31:15,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:15,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:16,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:16,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:17,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:17,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:18,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T07:31:18,797 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T07:31:18,798 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-25T07:31:18,801 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T07:31:18,801 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:18,803 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6., hostname=5eb3d201e8c9,34723,1732519867735, seqNum=2] 2024-11-25T07:31:18,811 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T07:31:18,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T07:31:18,817 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-25T07:31:18,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-25T07:31:18,819 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T07:31:18,820 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T07:31:18,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:18,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:18,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34723 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-25T07:31:18,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:18,981 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing aee4071b6bb6842737b95c6ae9831dd6 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T07:31:18,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/af47c96a295441ef981a4ea7e178792a is 1080, key is row0001/info:/1732519878805/Put/seqid=0 2024-11-25T07:31:19,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741837_1013 (size=6033) 2024-11-25T07:31:19,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741837_1013 (size=6033) 2024-11-25T07:31:19,003 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/af47c96a295441ef981a4ea7e178792a 2024-11-25T07:31:19,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/af47c96a295441ef981a4ea7e178792a as hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/af47c96a295441ef981a4ea7e178792a 2024-11-25T07:31:19,014 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/af47c96a295441ef981a4ea7e178792a, entries=1, sequenceid=5, filesize=5.9 K 2024-11-25T07:31:19,015 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aee4071b6bb6842737b95c6ae9831dd6 in 34ms, sequenceid=5, compaction requested=false 2024-11-25T07:31:19,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for aee4071b6bb6842737b95c6ae9831dd6: 2024-11-25T07:31:19,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:19,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-25T07:31:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-25T07:31:19,023 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-25T07:31:19,023 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-11-25T07:31:19,025 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 211 msec 2024-11-25T07:31:19,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:19,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:20,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:20,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:21,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:21,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:22,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:22,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:23,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:23,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:24,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:24,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:25,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:25,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:26,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:26,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:27,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:27,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:28,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-25T07:31:28,838 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T07:31:28,841 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T07:31:28,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T07:31:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-25T07:31:28,843 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-25T07:31:28,844 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T07:31:28,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T07:31:28,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:28,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:28,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34723 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-25T07:31:28,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:28,998 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing aee4071b6bb6842737b95c6ae9831dd6 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T07:31:29,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/22f3fd36b4694d29be6d9fc27e771a47 is 1080, key is row0002/info:/1732519888839/Put/seqid=0 2024-11-25T07:31:29,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741838_1014 (size=6033) 2024-11-25T07:31:29,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741838_1014 (size=6033) 2024-11-25T07:31:29,008 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/22f3fd36b4694d29be6d9fc27e771a47 2024-11-25T07:31:29,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/22f3fd36b4694d29be6d9fc27e771a47 as hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/22f3fd36b4694d29be6d9fc27e771a47 2024-11-25T07:31:29,020 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/22f3fd36b4694d29be6d9fc27e771a47, entries=1, sequenceid=9, filesize=5.9 K 2024-11-25T07:31:29,021 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aee4071b6bb6842737b95c6ae9831dd6 in 23ms, sequenceid=9, compaction requested=false 2024-11-25T07:31:29,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for aee4071b6bb6842737b95c6ae9831dd6: 2024-11-25T07:31:29,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:29,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-25T07:31:29,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-25T07:31:29,025 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-25T07:31:29,026 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-11-25T07:31:29,028 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-25T07:31:29,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:29,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:30,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:30,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:31,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:31,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:32,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:32,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 after 68045ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:31:32,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:32,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta after 68032ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T07:31:33,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:33,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:34,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:34,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:35,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:35,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:36,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:36,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:37,669 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T07:31:37,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:37,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:38,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:38,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:38,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-25T07:31:38,917 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T07:31:38,920 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C34723%2C1732519867735.1732519898920 2024-11-25T07:31:38,925 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:38,925 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:38,925 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:38,925 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:38,925 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:38,925 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735/5eb3d201e8c9%2C34723%2C1732519867735.1732519868120 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735/5eb3d201e8c9%2C34723%2C1732519867735.1732519898920 2024-11-25T07:31:38,926 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33541:33541),(127.0.0.1/127.0.0.1:44765:44765)] 2024-11-25T07:31:38,926 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735/5eb3d201e8c9%2C34723%2C1732519867735.1732519868120 is not closed yet, will try archiving it next time 2024-11-25T07:31:38,927 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T07:31:38,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741832_1008 (size=5546) 2024-11-25T07:31:38,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741832_1008 (size=5546) 2024-11-25T07:31:38,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T07:31:38,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-25T07:31:38,929 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-25T07:31:38,930 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T07:31:38,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T07:31:39,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34723 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-25T07:31:39,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:39,083 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing aee4071b6bb6842737b95c6ae9831dd6 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T07:31:39,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/2edf2cf7cc914bbca6c06ee815f7cbbc is 1080, key is row0003/info:/1732519898918/Put/seqid=0 2024-11-25T07:31:39,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741840_1016 (size=6033) 2024-11-25T07:31:39,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741840_1016 (size=6033) 2024-11-25T07:31:39,094 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/2edf2cf7cc914bbca6c06ee815f7cbbc 2024-11-25T07:31:39,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/2edf2cf7cc914bbca6c06ee815f7cbbc as hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/2edf2cf7cc914bbca6c06ee815f7cbbc 2024-11-25T07:31:39,105 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/2edf2cf7cc914bbca6c06ee815f7cbbc, entries=1, sequenceid=13, filesize=5.9 K 2024-11-25T07:31:39,106 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aee4071b6bb6842737b95c6ae9831dd6 in 23ms, sequenceid=13, compaction requested=true 2024-11-25T07:31:39,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for aee4071b6bb6842737b95c6ae9831dd6: 2024-11-25T07:31:39,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:39,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-25T07:31:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-25T07:31:39,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-25T07:31:39,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-25T07:31:39,114 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-25T07:31:39,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:39,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:40,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:40,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:41,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:41,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:42,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:42,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:43,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:43,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:44,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:44,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:45,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:45,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:46,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:46,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:47,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:47,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:48,703 INFO [master/5eb3d201e8c9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-25T07:31:48,703 INFO [master/5eb3d201e8c9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-25T07:31:48,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:48,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:48,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-25T07:31:48,958 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T07:31:48,958 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T07:31:48,959 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T07:31:48,960 DEBUG [Time-limited test {}] regionserver.HStore(1541): aee4071b6bb6842737b95c6ae9831dd6/info is initiating minor compaction (all files) 2024-11-25T07:31:48,960 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T07:31:48,960 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:31:48,960 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of aee4071b6bb6842737b95c6ae9831dd6/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:48,960 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/af47c96a295441ef981a4ea7e178792a, hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/22f3fd36b4694d29be6d9fc27e771a47, hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/2edf2cf7cc914bbca6c06ee815f7cbbc] into tmpdir=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp, totalSize=17.7 K 2024-11-25T07:31:48,960 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting af47c96a295441ef981a4ea7e178792a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732519878805 2024-11-25T07:31:48,961 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 22f3fd36b4694d29be6d9fc27e771a47, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732519888839 2024-11-25T07:31:48,961 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 2edf2cf7cc914bbca6c06ee815f7cbbc, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732519898918 2024-11-25T07:31:48,972 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): aee4071b6bb6842737b95c6ae9831dd6#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:31:48,973 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/2463063a40494bf086fa76ac5392ef39 is 1080, key is row0001/info:/1732519878805/Put/seqid=0 2024-11-25T07:31:48,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741841_1017 (size=8296) 2024-11-25T07:31:48,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741841_1017 (size=8296) 2024-11-25T07:31:48,984 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/2463063a40494bf086fa76ac5392ef39 as hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/2463063a40494bf086fa76ac5392ef39 2024-11-25T07:31:48,990 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in aee4071b6bb6842737b95c6ae9831dd6/info of aee4071b6bb6842737b95c6ae9831dd6 into 2463063a40494bf086fa76ac5392ef39(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:31:48,990 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for aee4071b6bb6842737b95c6ae9831dd6: 2024-11-25T07:31:48,993 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C34723%2C1732519867735.1732519908993 2024-11-25T07:31:49,001 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:49,001 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:49,001 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:49,002 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:49,002 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:49,002 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735/5eb3d201e8c9%2C34723%2C1732519867735.1732519898920 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735/5eb3d201e8c9%2C34723%2C1732519867735.1732519908993 2024-11-25T07:31:49,003 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44765:44765),(127.0.0.1/127.0.0.1:33541:33541)] 2024-11-25T07:31:49,003 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735/5eb3d201e8c9%2C34723%2C1732519867735.1732519898920 is not closed yet, will try archiving it next time 2024-11-25T07:31:49,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741839_1015 (size=2520) 2024-11-25T07:31:49,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741839_1015 (size=2520) 2024-11-25T07:31:49,003 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735/5eb3d201e8c9%2C34723%2C1732519867735.1732519868120 to hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/oldWALs/5eb3d201e8c9%2C34723%2C1732519867735.1732519868120 2024-11-25T07:31:49,004 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T07:31:49,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T07:31:49,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-25T07:31:49,007 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-25T07:31:49,007 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T07:31:49,008 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T07:31:49,160 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34723 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-25T07:31:49,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:49,161 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing aee4071b6bb6842737b95c6ae9831dd6 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T07:31:49,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/203a9ebb5e2b4a7895130c21c7f4d82e is 1080, key is row0000/info:/1732519908992/Put/seqid=0 2024-11-25T07:31:49,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741843_1019 (size=6033) 2024-11-25T07:31:49,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741843_1019 (size=6033) 2024-11-25T07:31:49,170 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/203a9ebb5e2b4a7895130c21c7f4d82e 2024-11-25T07:31:49,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/203a9ebb5e2b4a7895130c21c7f4d82e as hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/203a9ebb5e2b4a7895130c21c7f4d82e 2024-11-25T07:31:49,180 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/203a9ebb5e2b4a7895130c21c7f4d82e, entries=1, sequenceid=18, filesize=5.9 K 2024-11-25T07:31:49,181 INFO [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aee4071b6bb6842737b95c6ae9831dd6 in 20ms, sequenceid=18, compaction requested=false 2024-11-25T07:31:49,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for aee4071b6bb6842737b95c6ae9831dd6: 2024-11-25T07:31:49,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:49,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-25T07:31:49,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-25T07:31:49,186 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-25T07:31:49,186 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 175 msec 2024-11-25T07:31:49,188 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-11-25T07:31:49,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:49,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:50,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:50,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:51,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:51,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:52,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:52,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:53,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:53,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:54,108 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region aee4071b6bb6842737b95c6ae9831dd6, had cached 0 bytes from a total of 14329 2024-11-25T07:31:54,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:54,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:55,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:55,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:56,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:56,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:57,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:57,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:58,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:58,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:59,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-25T07:31:59,017 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T07:31:59,020 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C34723%2C1732519867735.1732519919020 2024-11-25T07:31:59,026 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,027 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,027 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,027 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,027 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,027 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735/5eb3d201e8c9%2C34723%2C1732519867735.1732519908993 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735/5eb3d201e8c9%2C34723%2C1732519867735.1732519919020 2024-11-25T07:31:59,028 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33541:33541),(127.0.0.1/127.0.0.1:44765:44765)] 2024-11-25T07:31:59,028 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735/5eb3d201e8c9%2C34723%2C1732519867735.1732519908993 is not closed yet, will try archiving it next time 2024-11-25T07:31:59,028 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/WALs/5eb3d201e8c9,34723,1732519867735/5eb3d201e8c9%2C34723%2C1732519867735.1732519898920 to hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/oldWALs/5eb3d201e8c9%2C34723%2C1732519867735.1732519898920 2024-11-25T07:31:59,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T07:31:59,028 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:31:59,028 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:31:59,028 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:31:59,029 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:31:59,029 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T07:31:59,029 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T07:31:59,029 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1096858046, stopped=false 2024-11-25T07:31:59,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741842_1018 (size=2026) 2024-11-25T07:31:59,029 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5eb3d201e8c9,37793,1732519867685 2024-11-25T07:31:59,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741842_1018 (size=2026) 2024-11-25T07:31:59,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:31:59,031 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:31:59,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:59,031 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:31:59,031 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:31:59,031 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:31:59,032 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5eb3d201e8c9,34723,1732519867735' ***** 2024-11-25T07:31:59,032 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T07:31:59,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:31:59,032 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:31:59,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:59,032 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T07:31:59,032 INFO [RS:0;5eb3d201e8c9:34723 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T07:31:59,032 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T07:31:59,032 INFO [RS:0;5eb3d201e8c9:34723 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T07:31:59,032 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(3091): Received CLOSE for aee4071b6bb6842737b95c6ae9831dd6 2024-11-25T07:31:59,032 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:31:59,032 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(959): stopping server 5eb3d201e8c9,34723,1732519867735 2024-11-25T07:31:59,032 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:31:59,032 INFO [RS:0;5eb3d201e8c9:34723 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5eb3d201e8c9:34723. 2024-11-25T07:31:59,033 DEBUG [RS:0;5eb3d201e8c9:34723 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:31:59,033 DEBUG [RS:0;5eb3d201e8c9:34723 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:31:59,033 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing aee4071b6bb6842737b95c6ae9831dd6, disabling compactions & flushes 2024-11-25T07:31:59,033 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:59,033 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:59,033 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T07:31:59,033 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T07:31:59,033 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T07:31:59,033 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. after waiting 0 ms 2024-11-25T07:31:59,033 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:59,033 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T07:31:59,033 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing aee4071b6bb6842737b95c6ae9831dd6 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T07:31:59,033 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-25T07:31:59,033 DEBUG [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, aee4071b6bb6842737b95c6ae9831dd6=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6.} 2024-11-25T07:31:59,033 DEBUG [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, aee4071b6bb6842737b95c6ae9831dd6 2024-11-25T07:31:59,033 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:31:59,033 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:31:59,033 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:31:59,033 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:31:59,033 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:31:59,033 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-25T07:31:59,037 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/324fcd1523e64a02991ced557ca83140 is 1080, key is row0001/info:/1732519919019/Put/seqid=0 2024-11-25T07:31:59,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741845_1021 (size=6033) 2024-11-25T07:31:59,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741845_1021 (size=6033) 2024-11-25T07:31:59,042 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/324fcd1523e64a02991ced557ca83140 2024-11-25T07:31:59,049 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/.tmp/info/324fcd1523e64a02991ced557ca83140 as hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/324fcd1523e64a02991ced557ca83140 2024-11-25T07:31:59,054 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/324fcd1523e64a02991ced557ca83140, entries=1, sequenceid=22, filesize=5.9 K 2024-11-25T07:31:59,055 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aee4071b6bb6842737b95c6ae9831dd6 in 22ms, sequenceid=22, compaction requested=true 2024-11-25T07:31:59,056 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/af47c96a295441ef981a4ea7e178792a, hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/22f3fd36b4694d29be6d9fc27e771a47, hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/2edf2cf7cc914bbca6c06ee815f7cbbc] to archive 2024-11-25T07:31:59,056 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/.tmp/info/611912e724a64f548408882ec78ac4ce is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6./info:regioninfo/1732519869122/Put/seqid=0 2024-11-25T07:31:59,057 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T07:31:59,058 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/af47c96a295441ef981a4ea7e178792a to hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/af47c96a295441ef981a4ea7e178792a 2024-11-25T07:31:59,060 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/22f3fd36b4694d29be6d9fc27e771a47 to hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/22f3fd36b4694d29be6d9fc27e771a47 2024-11-25T07:31:59,061 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/2edf2cf7cc914bbca6c06ee815f7cbbc to hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/info/2edf2cf7cc914bbca6c06ee815f7cbbc 2024-11-25T07:31:59,061 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5eb3d201e8c9:37793 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-25T07:31:59,061 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [af47c96a295441ef981a4ea7e178792a=6033, 22f3fd36b4694d29be6d9fc27e771a47=6033, 2edf2cf7cc914bbca6c06ee815f7cbbc=6033] 2024-11-25T07:31:59,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741846_1022 (size=7308) 2024-11-25T07:31:59,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741846_1022 (size=7308) 2024-11-25T07:31:59,067 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/.tmp/info/611912e724a64f548408882ec78ac4ce 2024-11-25T07:31:59,069 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/aee4071b6bb6842737b95c6ae9831dd6/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-25T07:31:59,070 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:59,070 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for aee4071b6bb6842737b95c6ae9831dd6: Waiting for close lock at 1732519919032Running coprocessor pre-close hooks at 1732519919032Disabling compacts and flushes for region at 1732519919033 (+1 ms)Disabling writes for close at 1732519919033Obtaining lock to block concurrent updates at 1732519919033Preparing flush snapshotting stores in aee4071b6bb6842737b95c6ae9831dd6 at 1732519919033Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732519919033Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. at 1732519919034 (+1 ms)Flushing aee4071b6bb6842737b95c6ae9831dd6/info: creating writer at 1732519919034Flushing aee4071b6bb6842737b95c6ae9831dd6/info: appending metadata at 1732519919037 (+3 ms)Flushing aee4071b6bb6842737b95c6ae9831dd6/info: closing flushed file at 1732519919037Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f4a7530: reopening flushed file at 1732519919048 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for aee4071b6bb6842737b95c6ae9831dd6 in 22ms, sequenceid=22, compaction requested=true at 1732519919055 (+7 ms)Writing region close event to WAL at 1732519919066 (+11 ms)Running coprocessor post-close hooks at 1732519919070 (+4 ms)Closed at 1732519919070 2024-11-25T07:31:59,070 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732519868771.aee4071b6bb6842737b95c6ae9831dd6. 2024-11-25T07:31:59,086 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/.tmp/ns/056d16a25b1e4c2aaf4d02d53eeaa52e is 43, key is default/ns:d/1732519868687/Put/seqid=0 2024-11-25T07:31:59,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741847_1023 (size=5153) 2024-11-25T07:31:59,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741847_1023 (size=5153) 2024-11-25T07:31:59,092 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/.tmp/ns/056d16a25b1e4c2aaf4d02d53eeaa52e 2024-11-25T07:31:59,111 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/.tmp/table/42aebc778189409e8f73e1a22b30dd73 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732519869132/Put/seqid=0 2024-11-25T07:31:59,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741848_1024 (size=5508) 2024-11-25T07:31:59,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741848_1024 (size=5508) 2024-11-25T07:31:59,116 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/.tmp/table/42aebc778189409e8f73e1a22b30dd73 2024-11-25T07:31:59,122 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/.tmp/info/611912e724a64f548408882ec78ac4ce as hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/info/611912e724a64f548408882ec78ac4ce 2024-11-25T07:31:59,127 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/info/611912e724a64f548408882ec78ac4ce, entries=10, sequenceid=11, filesize=7.1 K 2024-11-25T07:31:59,129 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/.tmp/ns/056d16a25b1e4c2aaf4d02d53eeaa52e as hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/ns/056d16a25b1e4c2aaf4d02d53eeaa52e 2024-11-25T07:31:59,134 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/ns/056d16a25b1e4c2aaf4d02d53eeaa52e, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T07:31:59,135 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/.tmp/table/42aebc778189409e8f73e1a22b30dd73 as hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/table/42aebc778189409e8f73e1a22b30dd73 2024-11-25T07:31:59,140 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/table/42aebc778189409e8f73e1a22b30dd73, entries=2, sequenceid=11, filesize=5.4 K 2024-11-25T07:31:59,141 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=11, compaction requested=false 2024-11-25T07:31:59,145 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T07:31:59,146 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:31:59,146 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:31:59,146 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519919033Running coprocessor pre-close hooks at 1732519919033Disabling compacts and flushes for region at 1732519919033Disabling writes for close at 1732519919033Obtaining lock to block concurrent updates at 1732519919033Preparing flush snapshotting stores in 1588230740 at 1732519919033Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732519919034 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732519919034Flushing 1588230740/info: creating writer at 1732519919034Flushing 1588230740/info: appending metadata at 1732519919056 (+22 ms)Flushing 1588230740/info: closing flushed file at 1732519919056Flushing 1588230740/ns: creating writer at 1732519919072 (+16 ms)Flushing 1588230740/ns: appending metadata at 1732519919086 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732519919086Flushing 1588230740/table: creating writer at 1732519919097 (+11 ms)Flushing 1588230740/table: appending metadata at 1732519919110 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732519919111 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@128dfcf4: reopening flushed file at 1732519919122 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@95834e8: reopening flushed file at 1732519919127 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@442c04a0: reopening flushed file at 1732519919134 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=11, compaction requested=false at 1732519919141 (+7 ms)Writing region close event to WAL at 1732519919142 (+1 ms)Running coprocessor post-close hooks at 1732519919146 (+4 ms)Closed at 1732519919146 2024-11-25T07:31:59,146 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T07:31:59,233 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(976): stopping server 5eb3d201e8c9,34723,1732519867735; all regions closed. 2024-11-25T07:31:59,234 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,234 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,234 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,234 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,234 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741834_1010 (size=3306) 2024-11-25T07:31:59,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741834_1010 (size=3306) 2024-11-25T07:31:59,239 DEBUG [RS:0;5eb3d201e8c9:34723 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/oldWALs 2024-11-25T07:31:59,239 INFO [RS:0;5eb3d201e8c9:34723 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C34723%2C1732519867735.meta:.meta(num 1732519868649) 2024-11-25T07:31:59,239 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,239 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,239 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,239 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,239 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741844_1020 (size=1252) 2024-11-25T07:31:59,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741844_1020 (size=1252) 2024-11-25T07:31:59,244 DEBUG [RS:0;5eb3d201e8c9:34723 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/oldWALs 2024-11-25T07:31:59,244 INFO [RS:0;5eb3d201e8c9:34723 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C34723%2C1732519867735:(num 1732519919020) 2024-11-25T07:31:59,244 DEBUG [RS:0;5eb3d201e8c9:34723 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:31:59,244 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:31:59,244 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:31:59,244 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.ChoreService(370): Chore service for: regionserver/5eb3d201e8c9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T07:31:59,245 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:31:59,245 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:31:59,245 INFO [RS:0;5eb3d201e8c9:34723 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34723 2024-11-25T07:31:59,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5eb3d201e8c9,34723,1732519867735 2024-11-25T07:31:59,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:31:59,247 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:31:59,248 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5eb3d201e8c9,34723,1732519867735] 2024-11-25T07:31:59,251 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5eb3d201e8c9,34723,1732519867735 already deleted, retry=false 2024-11-25T07:31:59,251 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5eb3d201e8c9,34723,1732519867735 expired; onlineServers=0 2024-11-25T07:31:59,251 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5eb3d201e8c9,37793,1732519867685' ***** 2024-11-25T07:31:59,251 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T07:31:59,251 INFO [M:0;5eb3d201e8c9:37793 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:31:59,251 INFO [M:0;5eb3d201e8c9:37793 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:31:59,251 DEBUG [M:0;5eb3d201e8c9:37793 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T07:31:59,251 DEBUG [M:0;5eb3d201e8c9:37793 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T07:31:59,251 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519867897 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519867897,5,FailOnTimeoutGroup] 2024-11-25T07:31:59,251 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519867897 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519867897,5,FailOnTimeoutGroup] 2024-11-25T07:31:59,251 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T07:31:59,251 INFO [M:0;5eb3d201e8c9:37793 {}] hbase.ChoreService(370): Chore service for: master/5eb3d201e8c9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T07:31:59,251 INFO [M:0;5eb3d201e8c9:37793 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:31:59,252 DEBUG [M:0;5eb3d201e8c9:37793 {}] master.HMaster(1795): Stopping service threads 2024-11-25T07:31:59,252 INFO [M:0;5eb3d201e8c9:37793 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T07:31:59,252 INFO [M:0;5eb3d201e8c9:37793 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:31:59,252 INFO [M:0;5eb3d201e8c9:37793 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T07:31:59,252 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T07:31:59,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T07:31:59,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:31:59,253 DEBUG [M:0;5eb3d201e8c9:37793 {}] zookeeper.ZKUtil(347): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T07:31:59,253 WARN [M:0;5eb3d201e8c9:37793 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T07:31:59,253 INFO [M:0;5eb3d201e8c9:37793 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/.lastflushedseqids 2024-11-25T07:31:59,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741849_1025 (size=130) 2024-11-25T07:31:59,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741849_1025 (size=130) 2024-11-25T07:31:59,259 INFO [M:0;5eb3d201e8c9:37793 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T07:31:59,259 INFO [M:0;5eb3d201e8c9:37793 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T07:31:59,259 DEBUG [M:0;5eb3d201e8c9:37793 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:31:59,260 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:31:59,260 DEBUG [M:0;5eb3d201e8c9:37793 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:31:59,260 DEBUG [M:0;5eb3d201e8c9:37793 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:31:59,260 DEBUG [M:0;5eb3d201e8c9:37793 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:31:59,260 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.61 KB heapSize=55.02 KB 2024-11-25T07:31:59,277 DEBUG [M:0;5eb3d201e8c9:37793 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/18c58905a8ad45668574a5a6bbe7006d is 82, key is hbase:meta,,1/info:regioninfo/1732519868672/Put/seqid=0 2024-11-25T07:31:59,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741850_1026 (size=5672) 2024-11-25T07:31:59,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741850_1026 (size=5672) 2024-11-25T07:31:59,282 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/18c58905a8ad45668574a5a6bbe7006d 2024-11-25T07:31:59,301 DEBUG [M:0;5eb3d201e8c9:37793 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/539a002740f24f50a0717e19733481b5 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732519869137/Put/seqid=0 2024-11-25T07:31:59,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741851_1027 (size=7825) 2024-11-25T07:31:59,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741851_1027 (size=7825) 2024-11-25T07:31:59,306 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.01 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/539a002740f24f50a0717e19733481b5 2024-11-25T07:31:59,310 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 539a002740f24f50a0717e19733481b5 2024-11-25T07:31:59,325 DEBUG [M:0;5eb3d201e8c9:37793 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/164636d5c6ea4a1fbf4a567a853b2e58 is 69, key is 5eb3d201e8c9,34723,1732519867735/rs:state/1732519867977/Put/seqid=0 2024-11-25T07:31:59,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741852_1028 (size=5156) 2024-11-25T07:31:59,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741852_1028 (size=5156) 2024-11-25T07:31:59,330 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/164636d5c6ea4a1fbf4a567a853b2e58 2024-11-25T07:31:59,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:31:59,349 INFO [RS:0;5eb3d201e8c9:34723 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:31:59,349 INFO [RS:0;5eb3d201e8c9:34723 {}] regionserver.HRegionServer(1031): Exiting; stopping=5eb3d201e8c9,34723,1732519867735; zookeeper connection closed. 2024-11-25T07:31:59,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34723-0x1014e091fa70001, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:31:59,349 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@51bfef5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@51bfef5 2024-11-25T07:31:59,349 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T07:31:59,350 DEBUG [M:0;5eb3d201e8c9:37793 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/10cebfa398404e618542a73d4ad6b705 is 52, key is load_balancer_on/state:d/1732519868768/Put/seqid=0 2024-11-25T07:31:59,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741853_1029 (size=5056) 2024-11-25T07:31:59,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741853_1029 (size=5056) 2024-11-25T07:31:59,355 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/10cebfa398404e618542a73d4ad6b705 2024-11-25T07:31:59,361 DEBUG [M:0;5eb3d201e8c9:37793 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/18c58905a8ad45668574a5a6bbe7006d as hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/18c58905a8ad45668574a5a6bbe7006d 2024-11-25T07:31:59,366 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/18c58905a8ad45668574a5a6bbe7006d, entries=8, sequenceid=121, filesize=5.5 K 2024-11-25T07:31:59,367 DEBUG [M:0;5eb3d201e8c9:37793 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/539a002740f24f50a0717e19733481b5 as hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/539a002740f24f50a0717e19733481b5 2024-11-25T07:31:59,371 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 539a002740f24f50a0717e19733481b5 2024-11-25T07:31:59,371 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/539a002740f24f50a0717e19733481b5, entries=14, sequenceid=121, filesize=7.6 K 2024-11-25T07:31:59,372 DEBUG [M:0;5eb3d201e8c9:37793 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/164636d5c6ea4a1fbf4a567a853b2e58 as hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/164636d5c6ea4a1fbf4a567a853b2e58 2024-11-25T07:31:59,376 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/164636d5c6ea4a1fbf4a567a853b2e58, entries=1, sequenceid=121, filesize=5.0 K 2024-11-25T07:31:59,377 DEBUG [M:0;5eb3d201e8c9:37793 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/10cebfa398404e618542a73d4ad6b705 as hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/10cebfa398404e618542a73d4ad6b705 2024-11-25T07:31:59,381 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/e1be01b8-428a-a094-5f4f-632b8dbf3c7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/10cebfa398404e618542a73d4ad6b705, entries=1, sequenceid=121, filesize=4.9 K 2024-11-25T07:31:59,382 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.61 KB/44656, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=121, compaction requested=false 2024-11-25T07:31:59,385 INFO [M:0;5eb3d201e8c9:37793 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:31:59,385 DEBUG [M:0;5eb3d201e8c9:37793 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519919259Disabling compacts and flushes for region at 1732519919259Disabling writes for close at 1732519919260 (+1 ms)Obtaining lock to block concurrent updates at 1732519919260Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732519919260Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44656, getHeapSize=56272, getOffHeapSize=0, getCellsCount=140 at 1732519919260Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732519919261 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732519919261Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732519919277 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732519919277Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732519919286 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732519919301 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732519919301Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732519919310 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732519919325 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732519919325Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732519919335 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732519919349 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732519919349Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3db43d49: reopening flushed file at 1732519919360 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b04fc9b: reopening flushed file at 1732519919366 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2580175e: reopening flushed file at 1732519919371 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5244054e: reopening flushed file at 1732519919376 (+5 ms)Finished flush of dataSize ~43.61 KB/44656, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=121, compaction requested=false at 1732519919382 (+6 ms)Writing region close event to WAL at 1732519919385 (+3 ms)Closed at 1732519919385 2024-11-25T07:31:59,385 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,385 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,385 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,386 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,386 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:31:59,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45489 is added to blk_1073741830_1006 (size=53053) 2024-11-25T07:31:59,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741830_1006 (size=53053) 2024-11-25T07:31:59,389 INFO [M:0;5eb3d201e8c9:37793 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T07:31:59,389 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:31:59,389 INFO [M:0;5eb3d201e8c9:37793 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37793 2024-11-25T07:31:59,389 INFO [M:0;5eb3d201e8c9:37793 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:31:59,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:31:59,491 INFO [M:0;5eb3d201e8c9:37793 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:31:59,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37793-0x1014e091fa70000, quorum=127.0.0.1:60919, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:31:59,493 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@770e751e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:31:59,494 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5828d739{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:31:59,494 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:31:59,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c1ed8d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:31:59,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e6bebf5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/hadoop.log.dir/,STOPPED} 2024-11-25T07:31:59,495 WARN [BP-1717048765-172.17.0.2-1732519866936 heartbeating to localhost/127.0.0.1:39459 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:31:59,495 WARN [BP-1717048765-172.17.0.2-1732519866936 heartbeating to localhost/127.0.0.1:39459 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1717048765-172.17.0.2-1732519866936 (Datanode Uuid 3b993c24-538d-4754-b757-0aece92ef136) service to localhost/127.0.0.1:39459 2024-11-25T07:31:59,495 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:31:59,496 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:31:59,496 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/cluster_6aac291b-a7c3-3ae9-e9e5-9d2903ea4436/data/data3/current/BP-1717048765-172.17.0.2-1732519866936 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:31:59,496 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/cluster_6aac291b-a7c3-3ae9-e9e5-9d2903ea4436/data/data4/current/BP-1717048765-172.17.0.2-1732519866936 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:31:59,496 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:31:59,498 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@16a85bb6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:31:59,498 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5cac9c50{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:31:59,498 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:31:59,498 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ed77c81{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:31:59,499 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c243e85{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/hadoop.log.dir/,STOPPED} 2024-11-25T07:31:59,500 WARN [BP-1717048765-172.17.0.2-1732519866936 heartbeating to localhost/127.0.0.1:39459 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:31:59,500 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:31:59,500 WARN [BP-1717048765-172.17.0.2-1732519866936 heartbeating to localhost/127.0.0.1:39459 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1717048765-172.17.0.2-1732519866936 (Datanode Uuid e4ec1b39-39eb-43f0-9aad-5277b2fc31f8) service to localhost/127.0.0.1:39459 2024-11-25T07:31:59,500 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:31:59,501 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/cluster_6aac291b-a7c3-3ae9-e9e5-9d2903ea4436/data/data1/current/BP-1717048765-172.17.0.2-1732519866936 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:31:59,501 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/cluster_6aac291b-a7c3-3ae9-e9e5-9d2903ea4436/data/data2/current/BP-1717048765-172.17.0.2-1732519866936 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:31:59,501 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:31:59,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1592e8d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:31:59,507 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68744dfe{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:31:59,507 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:31:59,508 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b93cee9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:31:59,508 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5838a3fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/hadoop.log.dir/,STOPPED} 2024-11-25T07:31:59,514 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T07:31:59,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T07:31:59,539 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39459 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39459 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:39459 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: regionserver/5eb3d201e8c9:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:39459 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39459 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39459 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:39459 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:39459 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=61 (was 111), ProcessCount=11 (was 11), AvailableMemoryMB=7969 (was 8022) 2024-11-25T07:31:59,547 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=61, ProcessCount=11, AvailableMemoryMB=7970 2024-11-25T07:31:59,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T07:31:59,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/hadoop.log.dir so I do NOT create it in target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae 2024-11-25T07:31:59,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77a9ab12-64c9-db5c-bca9-0d8acd16be4b/hadoop.tmp.dir so I do NOT create it in target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae 2024-11-25T07:31:59,547 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/cluster_1bca10b4-48f0-b93a-883b-e5b52990cf9d, deleteOnExit=true 2024-11-25T07:31:59,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T07:31:59,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/test.cache.data in system properties and HBase conf 2024-11-25T07:31:59,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T07:31:59,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/hadoop.log.dir in system properties and HBase conf 2024-11-25T07:31:59,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T07:31:59,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T07:31:59,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T07:31:59,548 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T07:31:59,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:31:59,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:31:59,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T07:31:59,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:31:59,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T07:31:59,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T07:31:59,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:31:59,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:31:59,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T07:31:59,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/nfs.dump.dir in system properties and HBase conf 2024-11-25T07:31:59,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/java.io.tmpdir in system properties and HBase conf 2024-11-25T07:31:59,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:31:59,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T07:31:59,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T07:31:59,562 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:31:59,624 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:31:59,628 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:31:59,629 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:31:59,629 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:31:59,629 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:31:59,629 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:31:59,630 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33382c80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:31:59,630 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5327e2a9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:31:59,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1520cb76{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/java.io.tmpdir/jetty-localhost-33079-hadoop-hdfs-3_4_1-tests_jar-_-any-15477868602104880090/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:31:59,744 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65b546b1{HTTP/1.1, (http/1.1)}{localhost:33079} 2024-11-25T07:31:59,744 INFO [Time-limited test {}] server.Server(415): Started @239591ms 2024-11-25T07:31:59,757 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:31:59,860 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:31:59,863 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:31:59,877 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:31:59,877 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:31:59,877 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T07:31:59,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16ccf5f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:31:59,878 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2735da07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:31:59,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:59,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:31:59,990 INFO [regionserver/5eb3d201e8c9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:31:59,998 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f932cc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/java.io.tmpdir/jetty-localhost-41651-hadoop-hdfs-3_4_1-tests_jar-_-any-10723481418602495757/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:31:59,998 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ff7780b{HTTP/1.1, (http/1.1)}{localhost:41651} 2024-11-25T07:31:59,998 INFO [Time-limited test {}] server.Server(415): Started @239845ms 2024-11-25T07:31:59,999 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:32:00,028 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:32:00,031 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:32:00,032 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:32:00,032 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:32:00,032 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:32:00,032 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25dcc129{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:32:00,033 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26a9d62d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:32:00,100 WARN [Thread-1957 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/cluster_1bca10b4-48f0-b93a-883b-e5b52990cf9d/data/data1/current/BP-1014115906-172.17.0.2-1732519919568/current, will proceed with Du for space computation calculation, 2024-11-25T07:32:00,100 WARN [Thread-1958 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/cluster_1bca10b4-48f0-b93a-883b-e5b52990cf9d/data/data2/current/BP-1014115906-172.17.0.2-1732519919568/current, will proceed with Du for space computation calculation, 2024-11-25T07:32:00,117 WARN [Thread-1936 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:32:00,120 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdde0d6d9626be7d8 with lease ID 0x132c02faac2c9c97: Processing first storage report for DS-2df08e7d-375e-4404-8d4d-3c2defc4c788 from datanode DatanodeRegistration(127.0.0.1:35065, datanodeUuid=4911d89d-b9a8-4853-9f1e-54d200d1830d, infoPort=39055, infoSecurePort=0, ipcPort=44589, storageInfo=lv=-57;cid=testClusterID;nsid=1220531591;c=1732519919568) 2024-11-25T07:32:00,120 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdde0d6d9626be7d8 with lease ID 0x132c02faac2c9c97: from storage DS-2df08e7d-375e-4404-8d4d-3c2defc4c788 node DatanodeRegistration(127.0.0.1:35065, datanodeUuid=4911d89d-b9a8-4853-9f1e-54d200d1830d, infoPort=39055, infoSecurePort=0, ipcPort=44589, storageInfo=lv=-57;cid=testClusterID;nsid=1220531591;c=1732519919568), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T07:32:00,120 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdde0d6d9626be7d8 with lease ID 0x132c02faac2c9c97: Processing first storage report for DS-87849bc7-9dbe-4f2f-bceb-f2f97d80ecdb from datanode DatanodeRegistration(127.0.0.1:35065, datanodeUuid=4911d89d-b9a8-4853-9f1e-54d200d1830d, infoPort=39055, infoSecurePort=0, ipcPort=44589, storageInfo=lv=-57;cid=testClusterID;nsid=1220531591;c=1732519919568) 2024-11-25T07:32:00,120 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdde0d6d9626be7d8 with lease ID 0x132c02faac2c9c97: from storage DS-87849bc7-9dbe-4f2f-bceb-f2f97d80ecdb node DatanodeRegistration(127.0.0.1:35065, datanodeUuid=4911d89d-b9a8-4853-9f1e-54d200d1830d, infoPort=39055, infoSecurePort=0, ipcPort=44589, storageInfo=lv=-57;cid=testClusterID;nsid=1220531591;c=1732519919568), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:32:00,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ae6275a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/java.io.tmpdir/jetty-localhost-36429-hadoop-hdfs-3_4_1-tests_jar-_-any-6208104260629658919/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:32:00,150 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a34980e{HTTP/1.1, (http/1.1)}{localhost:36429} 2024-11-25T07:32:00,150 INFO [Time-limited test {}] server.Server(415): Started @239997ms 2024-11-25T07:32:00,151 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:32:00,254 WARN [Thread-1983 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/cluster_1bca10b4-48f0-b93a-883b-e5b52990cf9d/data/data3/current/BP-1014115906-172.17.0.2-1732519919568/current, will proceed with Du for space computation calculation, 2024-11-25T07:32:00,254 WARN [Thread-1984 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/cluster_1bca10b4-48f0-b93a-883b-e5b52990cf9d/data/data4/current/BP-1014115906-172.17.0.2-1732519919568/current, will proceed with Du for space computation calculation, 2024-11-25T07:32:00,271 WARN [Thread-1972 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:32:00,273 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc85c772d39aaf770 with lease ID 0x132c02faac2c9c98: Processing first storage report for DS-e5d04b08-a0bc-4e2a-805e-e157111c7ded from datanode DatanodeRegistration(127.0.0.1:35031, datanodeUuid=cf91bf23-96cc-46eb-97b3-269ddd361ff1, infoPort=36405, infoSecurePort=0, ipcPort=38877, storageInfo=lv=-57;cid=testClusterID;nsid=1220531591;c=1732519919568) 2024-11-25T07:32:00,273 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc85c772d39aaf770 with lease ID 0x132c02faac2c9c98: from storage DS-e5d04b08-a0bc-4e2a-805e-e157111c7ded node DatanodeRegistration(127.0.0.1:35031, datanodeUuid=cf91bf23-96cc-46eb-97b3-269ddd361ff1, infoPort=36405, infoSecurePort=0, ipcPort=38877, storageInfo=lv=-57;cid=testClusterID;nsid=1220531591;c=1732519919568), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:32:00,273 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc85c772d39aaf770 with lease ID 0x132c02faac2c9c98: Processing first storage report for DS-2e02fc86-f543-4d28-a9d6-33bd7a475234 from datanode DatanodeRegistration(127.0.0.1:35031, datanodeUuid=cf91bf23-96cc-46eb-97b3-269ddd361ff1, infoPort=36405, infoSecurePort=0, ipcPort=38877, storageInfo=lv=-57;cid=testClusterID;nsid=1220531591;c=1732519919568) 2024-11-25T07:32:00,273 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc85c772d39aaf770 with lease ID 0x132c02faac2c9c98: from storage DS-2e02fc86-f543-4d28-a9d6-33bd7a475234 node DatanodeRegistration(127.0.0.1:35031, datanodeUuid=cf91bf23-96cc-46eb-97b3-269ddd361ff1, infoPort=36405, infoSecurePort=0, ipcPort=38877, storageInfo=lv=-57;cid=testClusterID;nsid=1220531591;c=1732519919568), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:32:00,374 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae 2024-11-25T07:32:00,377 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/cluster_1bca10b4-48f0-b93a-883b-e5b52990cf9d/zookeeper_0, clientPort=55992, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/cluster_1bca10b4-48f0-b93a-883b-e5b52990cf9d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/cluster_1bca10b4-48f0-b93a-883b-e5b52990cf9d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T07:32:00,378 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55992 2024-11-25T07:32:00,379 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:00,380 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:00,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:32:00,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:32:00,389 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f with version=8 2024-11-25T07:32:00,389 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/hbase-staging 2024-11-25T07:32:00,391 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:32:00,391 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:32:00,391 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:32:00,391 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:32:00,391 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:32:00,391 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:32:00,391 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T07:32:00,391 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:32:00,392 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41919 2024-11-25T07:32:00,393 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41919 connecting to ZooKeeper ensemble=127.0.0.1:55992 2024-11-25T07:32:00,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:419190x0, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:32:00,399 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41919-0x1014e09ed880000 connected 2024-11-25T07:32:00,415 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:00,416 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:00,418 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:32:00,418 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f, hbase.cluster.distributed=false 2024-11-25T07:32:00,420 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:32:00,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41919 2024-11-25T07:32:00,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41919 2024-11-25T07:32:00,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41919 2024-11-25T07:32:00,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41919 2024-11-25T07:32:00,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41919 2024-11-25T07:32:00,437 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:32:00,437 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:32:00,437 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:32:00,437 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:32:00,437 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:32:00,437 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:32:00,437 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T07:32:00,438 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:32:00,438 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33373 2024-11-25T07:32:00,440 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33373 connecting to ZooKeeper ensemble=127.0.0.1:55992 2024-11-25T07:32:00,440 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:00,442 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:00,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:333730x0, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:32:00,446 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33373-0x1014e09ed880001 connected 2024-11-25T07:32:00,446 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:32:00,446 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T07:32:00,450 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T07:32:00,451 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T07:32:00,452 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:32:00,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33373 2024-11-25T07:32:00,454 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33373 2024-11-25T07:32:00,454 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33373 2024-11-25T07:32:00,455 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33373 2024-11-25T07:32:00,455 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33373 2024-11-25T07:32:00,467 DEBUG [M:0;5eb3d201e8c9:41919 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5eb3d201e8c9:41919 2024-11-25T07:32:00,468 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5eb3d201e8c9,41919,1732519920390 2024-11-25T07:32:00,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:32:00,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:32:00,470 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5eb3d201e8c9,41919,1732519920390 2024-11-25T07:32:00,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T07:32:00,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:00,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:00,474 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T07:32:00,474 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5eb3d201e8c9,41919,1732519920390 from backup master directory 2024-11-25T07:32:00,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5eb3d201e8c9,41919,1732519920390 2024-11-25T07:32:00,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:32:00,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:32:00,475 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:32:00,475 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5eb3d201e8c9,41919,1732519920390 2024-11-25T07:32:00,479 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/hbase.id] with ID: 56572839-9a99-446e-bb89-96680fe93fa3 2024-11-25T07:32:00,479 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/.tmp/hbase.id 2024-11-25T07:32:00,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:32:00,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:32:00,486 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/.tmp/hbase.id]:[hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/hbase.id] 2024-11-25T07:32:00,496 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:00,496 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T07:32:00,497 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T07:32:00,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:00,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:00,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:32:00,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:32:00,508 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T07:32:00,509 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T07:32:00,509 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:32:00,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:32:00,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:32:00,517 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store 2024-11-25T07:32:00,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:32:00,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:32:00,526 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:32:00,527 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:32:00,527 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:32:00,527 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:32:00,527 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:32:00,527 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:32:00,527 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:32:00,527 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519920527Disabling compacts and flushes for region at 1732519920527Disabling writes for close at 1732519920527Writing region close event to WAL at 1732519920527Closed at 1732519920527 2024-11-25T07:32:00,528 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/.initializing 2024-11-25T07:32:00,528 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/WALs/5eb3d201e8c9,41919,1732519920390 2024-11-25T07:32:00,530 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C41919%2C1732519920390, suffix=, logDir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/WALs/5eb3d201e8c9,41919,1732519920390, archiveDir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/oldWALs, maxLogs=10 2024-11-25T07:32:00,530 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C41919%2C1732519920390.1732519920530 2024-11-25T07:32:00,535 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/WALs/5eb3d201e8c9,41919,1732519920390/5eb3d201e8c9%2C41919%2C1732519920390.1732519920530 2024-11-25T07:32:00,536 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36405:36405),(127.0.0.1/127.0.0.1:39055:39055)] 2024-11-25T07:32:00,540 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:32:00,540 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:32:00,540 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:00,540 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:00,544 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:00,545 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T07:32:00,545 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:00,546 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:00,546 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:00,547 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T07:32:00,547 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:00,547 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:32:00,548 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:00,548 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T07:32:00,549 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:00,549 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:32:00,549 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:00,550 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T07:32:00,550 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:00,551 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:32:00,551 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:00,551 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:00,551 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:00,553 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:00,553 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:00,553 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T07:32:00,555 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:00,557 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:32:00,557 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878932, jitterRate=0.11762085556983948}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T07:32:00,558 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732519920541Initializing all the Stores at 1732519920541Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519920541Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519920544 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519920544Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519920544Cleaning up temporary data from old regions at 1732519920553 (+9 ms)Region opened successfully at 1732519920558 (+5 ms) 2024-11-25T07:32:00,558 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T07:32:00,562 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527e4c4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:32:00,563 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T07:32:00,563 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T07:32:00,563 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T07:32:00,563 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T07:32:00,564 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T07:32:00,564 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T07:32:00,564 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T07:32:00,566 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T07:32:00,567 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T07:32:00,568 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T07:32:00,569 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T07:32:00,569 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T07:32:00,572 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T07:32:00,572 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T07:32:00,573 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T07:32:00,574 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T07:32:00,575 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T07:32:00,576 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T07:32:00,578 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T07:32:00,579 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T07:32:00,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:32:00,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:32:00,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:00,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:00,583 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5eb3d201e8c9,41919,1732519920390, sessionid=0x1014e09ed880000, setting cluster-up flag (Was=false) 2024-11-25T07:32:00,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:00,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:00,593 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T07:32:00,594 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,41919,1732519920390 2024-11-25T07:32:00,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:00,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:00,603 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T07:32:00,604 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,41919,1732519920390 2024-11-25T07:32:00,606 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T07:32:00,607 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T07:32:00,608 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T07:32:00,608 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T07:32:00,608 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5eb3d201e8c9,41919,1732519920390 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T07:32:00,610 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:32:00,610 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:32:00,610 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:32:00,610 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:32:00,610 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5eb3d201e8c9:0, corePoolSize=10, maxPoolSize=10 2024-11-25T07:32:00,610 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:00,610 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:32:00,610 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:00,612 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732519950612 2024-11-25T07:32:00,612 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T07:32:00,612 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T07:32:00,612 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T07:32:00,612 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T07:32:00,612 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T07:32:00,612 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T07:32:00,612 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:32:00,612 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T07:32:00,612 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,613 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T07:32:00,613 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T07:32:00,613 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T07:32:00,614 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:00,614 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T07:32:00,614 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T07:32:00,614 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T07:32:00,614 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519920614,5,FailOnTimeoutGroup] 2024-11-25T07:32:00,615 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519920615,5,FailOnTimeoutGroup] 2024-11-25T07:32:00,615 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,615 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T07:32:00,615 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,615 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:32:00,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:32:00,626 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T07:32:00,626 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f 2024-11-25T07:32:00,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741832_1008 (size=32) 2024-11-25T07:32:00,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741832_1008 (size=32) 2024-11-25T07:32:00,634 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:32:00,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:32:00,636 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:32:00,637 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:00,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:00,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:32:00,638 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:32:00,638 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:00,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:00,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:32:00,639 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:32:00,639 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:00,640 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:00,640 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:32:00,641 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:32:00,641 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:00,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:00,641 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:32:00,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740 2024-11-25T07:32:00,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740 2024-11-25T07:32:00,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:32:00,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:32:00,644 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:32:00,645 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:32:00,646 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:32:00,647 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=730325, jitterRate=-0.07134406268596649}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:32:00,647 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732519920634Initializing all the Stores at 1732519920634Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519920634Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519920635 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519920635Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519920635Cleaning up temporary data from old regions at 1732519920643 (+8 ms)Region opened successfully at 1732519920647 (+4 ms) 2024-11-25T07:32:00,647 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:32:00,647 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:32:00,647 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:32:00,647 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:32:00,648 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:32:00,648 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:32:00,648 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519920647Disabling compacts and flushes for region at 1732519920647Disabling writes for close at 1732519920648 (+1 ms)Writing region close event to WAL at 1732519920648Closed at 1732519920648 2024-11-25T07:32:00,649 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:32:00,649 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T07:32:00,649 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T07:32:00,651 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:32:00,652 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T07:32:00,657 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(746): ClusterId : 56572839-9a99-446e-bb89-96680fe93fa3 2024-11-25T07:32:00,657 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T07:32:00,659 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T07:32:00,659 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T07:32:00,662 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T07:32:00,662 DEBUG [RS:0;5eb3d201e8c9:33373 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dde65c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:32:00,674 DEBUG [RS:0;5eb3d201e8c9:33373 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5eb3d201e8c9:33373 2024-11-25T07:32:00,674 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T07:32:00,674 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T07:32:00,674 DEBUG [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T07:32:00,675 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(2659): reportForDuty to master=5eb3d201e8c9,41919,1732519920390 with port=33373, startcode=1732519920437 2024-11-25T07:32:00,675 DEBUG [RS:0;5eb3d201e8c9:33373 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T07:32:00,677 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35681, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T07:32:00,678 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41919 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:00,678 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41919 {}] master.ServerManager(517): Registering regionserver=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:00,679 DEBUG [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f 2024-11-25T07:32:00,679 DEBUG [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37267 2024-11-25T07:32:00,679 DEBUG [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T07:32:00,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:32:00,681 DEBUG [RS:0;5eb3d201e8c9:33373 {}] zookeeper.ZKUtil(111): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:00,682 WARN [RS:0;5eb3d201e8c9:33373 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:32:00,682 INFO [RS:0;5eb3d201e8c9:33373 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:32:00,682 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5eb3d201e8c9,33373,1732519920437] 2024-11-25T07:32:00,682 DEBUG [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/WALs/5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:00,685 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T07:32:00,687 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T07:32:00,688 INFO [RS:0;5eb3d201e8c9:33373 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T07:32:00,688 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,690 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T07:32:00,691 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T07:32:00,691 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,691 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:00,691 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:00,691 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:00,691 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:00,691 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:00,691 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:32:00,691 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:00,691 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:00,691 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:00,691 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:00,691 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:00,691 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:00,691 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:32:00,692 DEBUG [RS:0;5eb3d201e8c9:33373 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:32:00,692 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,692 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,692 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,692 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,692 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,692 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,33373,1732519920437-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:32:00,707 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T07:32:00,707 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,33373,1732519920437-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,708 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,708 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.Replication(171): 5eb3d201e8c9,33373,1732519920437 started 2024-11-25T07:32:00,722 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:00,722 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(1482): Serving as 5eb3d201e8c9,33373,1732519920437, RpcServer on 5eb3d201e8c9/172.17.0.2:33373, sessionid=0x1014e09ed880001 2024-11-25T07:32:00,723 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T07:32:00,723 DEBUG [RS:0;5eb3d201e8c9:33373 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:00,723 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,33373,1732519920437' 2024-11-25T07:32:00,723 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T07:32:00,723 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T07:32:00,724 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T07:32:00,724 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T07:32:00,724 DEBUG [RS:0;5eb3d201e8c9:33373 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:00,724 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,33373,1732519920437' 2024-11-25T07:32:00,724 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T07:32:00,724 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T07:32:00,724 DEBUG [RS:0;5eb3d201e8c9:33373 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T07:32:00,725 INFO [RS:0;5eb3d201e8c9:33373 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T07:32:00,725 INFO [RS:0;5eb3d201e8c9:33373 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T07:32:00,802 WARN [5eb3d201e8c9:41919 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T07:32:00,826 INFO [RS:0;5eb3d201e8c9:33373 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C33373%2C1732519920437, suffix=, logDir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/WALs/5eb3d201e8c9,33373,1732519920437, archiveDir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/oldWALs, maxLogs=32 2024-11-25T07:32:00,827 INFO [RS:0;5eb3d201e8c9:33373 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C33373%2C1732519920437.1732519920827 2024-11-25T07:32:00,833 INFO [RS:0;5eb3d201e8c9:33373 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/WALs/5eb3d201e8c9,33373,1732519920437/5eb3d201e8c9%2C33373%2C1732519920437.1732519920827 2024-11-25T07:32:00,834 DEBUG [RS:0;5eb3d201e8c9:33373 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36405:36405),(127.0.0.1/127.0.0.1:39055:39055)] 2024-11-25T07:32:00,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:00,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:01,052 DEBUG [5eb3d201e8c9:41919 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T07:32:01,053 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:01,054 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,33373,1732519920437, state=OPENING 2024-11-25T07:32:01,055 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T07:32:01,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:01,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:01,057 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:32:01,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:32:01,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:32:01,057 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,33373,1732519920437}] 2024-11-25T07:32:01,210 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T07:32:01,212 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47533, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T07:32:01,215 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T07:32:01,215 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:32:01,217 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C33373%2C1732519920437.meta, suffix=.meta, logDir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/WALs/5eb3d201e8c9,33373,1732519920437, archiveDir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/oldWALs, maxLogs=32 2024-11-25T07:32:01,218 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C33373%2C1732519920437.meta.1732519921217.meta 2024-11-25T07:32:01,223 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/WALs/5eb3d201e8c9,33373,1732519920437/5eb3d201e8c9%2C33373%2C1732519920437.meta.1732519921217.meta 2024-11-25T07:32:01,224 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39055:39055),(127.0.0.1/127.0.0.1:36405:36405)] 2024-11-25T07:32:01,229 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:32:01,230 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T07:32:01,230 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T07:32:01,230 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T07:32:01,230 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T07:32:01,230 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:32:01,230 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T07:32:01,230 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T07:32:01,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:32:01,232 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:32:01,232 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:01,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:01,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:32:01,234 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:32:01,234 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:01,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:01,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:32:01,235 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:32:01,235 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:01,235 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:01,235 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:32:01,236 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:32:01,236 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:01,236 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:01,236 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:32:01,237 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740 2024-11-25T07:32:01,238 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740 2024-11-25T07:32:01,239 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:32:01,239 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:32:01,239 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:32:01,241 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:32:01,242 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=838386, jitterRate=0.06606373190879822}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:32:01,242 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T07:32:01,242 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732519921230Writing region info on filesystem at 1732519921230Initializing all the Stores at 1732519921231 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519921231Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519921232 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519921232Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519921232Cleaning up temporary data from old regions at 1732519921239 (+7 ms)Running coprocessor post-open hooks at 1732519921242 (+3 ms)Region opened successfully at 1732519921242 2024-11-25T07:32:01,243 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732519921210 2024-11-25T07:32:01,245 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T07:32:01,245 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T07:32:01,246 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:01,247 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,33373,1732519920437, state=OPEN 2024-11-25T07:32:01,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:32:01,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:32:01,252 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:01,252 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:32:01,252 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:32:01,255 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T07:32:01,255 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,33373,1732519920437 in 195 msec 2024-11-25T07:32:01,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T07:32:01,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 606 msec 2024-11-25T07:32:01,259 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:32:01,259 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T07:32:01,260 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:32:01,260 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,33373,1732519920437, seqNum=-1] 2024-11-25T07:32:01,260 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:32:01,261 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55047, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:32:01,266 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 659 msec 2024-11-25T07:32:01,266 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732519921266, completionTime=-1 2024-11-25T07:32:01,266 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T07:32:01,266 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T07:32:01,268 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T07:32:01,268 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732519981268 2024-11-25T07:32:01,268 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732520041268 2024-11-25T07:32:01,268 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-25T07:32:01,269 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41919,1732519920390-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:01,269 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41919,1732519920390-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:01,269 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41919,1732519920390-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:01,269 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5eb3d201e8c9:41919, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:01,269 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:01,269 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:01,270 DEBUG [master/5eb3d201e8c9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T07:32:01,272 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.797sec 2024-11-25T07:32:01,273 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T07:32:01,273 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T07:32:01,273 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T07:32:01,273 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T07:32:01,273 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T07:32:01,273 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41919,1732519920390-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:32:01,273 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41919,1732519920390-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T07:32:01,275 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T07:32:01,275 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T07:32:01,275 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,41919,1732519920390-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:01,357 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b81e396, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:32:01,357 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5eb3d201e8c9,41919,-1 for getting cluster id 2024-11-25T07:32:01,357 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T07:32:01,359 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '56572839-9a99-446e-bb89-96680fe93fa3' 2024-11-25T07:32:01,359 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T07:32:01,359 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "56572839-9a99-446e-bb89-96680fe93fa3" 2024-11-25T07:32:01,359 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fefeea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:32:01,359 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5eb3d201e8c9,41919,-1] 2024-11-25T07:32:01,360 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T07:32:01,360 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:32:01,361 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36246, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T07:32:01,362 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@196c243a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:32:01,362 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:32:01,363 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,33373,1732519920437, seqNum=-1] 2024-11-25T07:32:01,363 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:32:01,364 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56934, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:32:01,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5eb3d201e8c9,41919,1732519920390 2024-11-25T07:32:01,366 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:01,368 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T07:32:01,368 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T07:32:01,369 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 5eb3d201e8c9,41919,1732519920390 2024-11-25T07:32:01,369 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@10dedffc 2024-11-25T07:32:01,369 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T07:32:01,370 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36260, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T07:32:01,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41919 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T07:32:01,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41919 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T07:32:01,371 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41919 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T07:32:01,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41919 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-25T07:32:01,373 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T07:32:01,373 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:01,373 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41919 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-25T07:32:01,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T07:32:01,374 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T07:32:01,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741835_1011 (size=381) 2024-11-25T07:32:01,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741835_1011 (size=381) 2024-11-25T07:32:01,382 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 991144de426cafcdf6a39be16db812fb, NAME => 'TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f 2024-11-25T07:32:01,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741836_1012 (size=64) 2024-11-25T07:32:01,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741836_1012 (size=64) 2024-11-25T07:32:01,388 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:32:01,388 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 991144de426cafcdf6a39be16db812fb, disabling compactions & flushes 2024-11-25T07:32:01,388 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:01,388 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:01,388 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. after waiting 0 ms 2024-11-25T07:32:01,388 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:01,388 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:01,388 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 991144de426cafcdf6a39be16db812fb: Waiting for close lock at 1732519921388Disabling compacts and flushes for region at 1732519921388Disabling writes for close at 1732519921388Writing region close event to WAL at 1732519921388Closed at 1732519921388 2024-11-25T07:32:01,390 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T07:32:01,390 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732519921390"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732519921390"}]},"ts":"1732519921390"} 2024-11-25T07:32:01,392 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T07:32:01,393 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T07:32:01,393 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732519921393"}]},"ts":"1732519921393"} 2024-11-25T07:32:01,395 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-25T07:32:01,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=991144de426cafcdf6a39be16db812fb, ASSIGN}] 2024-11-25T07:32:01,396 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=991144de426cafcdf6a39be16db812fb, ASSIGN 2024-11-25T07:32:01,397 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=991144de426cafcdf6a39be16db812fb, ASSIGN; state=OFFLINE, location=5eb3d201e8c9,33373,1732519920437; forceNewPlan=false, retain=false 2024-11-25T07:32:01,548 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=991144de426cafcdf6a39be16db812fb, regionState=OPENING, regionLocation=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:01,550 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=991144de426cafcdf6a39be16db812fb, ASSIGN because future has completed 2024-11-25T07:32:01,551 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 991144de426cafcdf6a39be16db812fb, server=5eb3d201e8c9,33373,1732519920437}] 2024-11-25T07:32:01,707 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:01,708 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 991144de426cafcdf6a39be16db812fb, NAME => 'TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:32:01,708 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:01,708 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:32:01,708 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:01,708 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:01,709 INFO [StoreOpener-991144de426cafcdf6a39be16db812fb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:01,710 INFO [StoreOpener-991144de426cafcdf6a39be16db812fb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 991144de426cafcdf6a39be16db812fb columnFamilyName info 2024-11-25T07:32:01,711 DEBUG [StoreOpener-991144de426cafcdf6a39be16db812fb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:01,711 INFO [StoreOpener-991144de426cafcdf6a39be16db812fb-1 {}] regionserver.HStore(327): Store=991144de426cafcdf6a39be16db812fb/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:32:01,711 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:01,712 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:01,712 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:01,713 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:01,713 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:01,714 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:01,716 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:32:01,716 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 991144de426cafcdf6a39be16db812fb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705869, jitterRate=-0.10244210064411163}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T07:32:01,716 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:01,717 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 991144de426cafcdf6a39be16db812fb: Running coprocessor pre-open hook at 1732519921708Writing region info on filesystem at 1732519921708Initializing all the Stores at 1732519921709 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519921709Cleaning up temporary data from old regions at 1732519921713 (+4 ms)Running coprocessor post-open hooks at 1732519921716 (+3 ms)Region opened successfully at 1732519921717 (+1 ms) 2024-11-25T07:32:01,718 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., pid=6, masterSystemTime=1732519921704 2024-11-25T07:32:01,720 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:01,720 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:01,721 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=991144de426cafcdf6a39be16db812fb, regionState=OPEN, openSeqNum=2, regionLocation=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:01,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 991144de426cafcdf6a39be16db812fb, server=5eb3d201e8c9,33373,1732519920437 because future has completed 2024-11-25T07:32:01,726 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T07:32:01,726 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 991144de426cafcdf6a39be16db812fb, server=5eb3d201e8c9,33373,1732519920437 in 173 msec 2024-11-25T07:32:01,729 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T07:32:01,729 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=991144de426cafcdf6a39be16db812fb, ASSIGN in 331 msec 2024-11-25T07:32:01,730 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T07:32:01,730 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732519921730"}]},"ts":"1732519921730"} 2024-11-25T07:32:01,733 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-25T07:32:01,734 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T07:32:01,736 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 363 msec 2024-11-25T07:32:01,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:01,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:02,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:02,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:03,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:03,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:04,070 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,070 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,092 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,596 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T07:32:04,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:04,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:04,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:05,761 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-25T07:32:05,762 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-25T07:32:05,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T07:32:05,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:05,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:06,686 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T07:32:06,686 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-25T07:32:06,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:06,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:07,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:07,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:08,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:08,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:09,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:09,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:10,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:10,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:11,264 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T07:32:11,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,288 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,288 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,288 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,292 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,292 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:11,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41919 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T07:32:11,437 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-25T07:32:11,437 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-25T07:32:11,440 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-25T07:32:11,440 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:11,442 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=2] 2024-11-25T07:32:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:11,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 991144de426cafcdf6a39be16db812fb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T07:32:11,470 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/36beda8ccb6c4fe5a9f0499568682d9c is 1080, key is row0001/info:/1732519931443/Put/seqid=0 2024-11-25T07:32:11,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741837_1013 (size=12509) 2024-11-25T07:32:11,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741837_1013 (size=12509) 2024-11-25T07:32:11,479 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/36beda8ccb6c4fe5a9f0499568682d9c 2024-11-25T07:32:11,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/36beda8ccb6c4fe5a9f0499568682d9c as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/36beda8ccb6c4fe5a9f0499568682d9c 2024-11-25T07:32:11,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=991144de426cafcdf6a39be16db812fb, server=5eb3d201e8c9,33373,1732519920437 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-25T07:32:11,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/36beda8ccb6c4fe5a9f0499568682d9c, entries=7, sequenceid=11, filesize=12.2 K 2024-11-25T07:32:11,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 991144de426cafcdf6a39be16db812fb in 38ms, sequenceid=11, compaction requested=false 2024-11-25T07:32:11,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 991144de426cafcdf6a39be16db812fb: 2024-11-25T07:32:11,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:56934 deadline: 1732519941488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=991144de426cafcdf6a39be16db812fb, server=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:11,511 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=991144de426cafcdf6a39be16db812fb, server=5eb3d201e8c9,33373,1732519920437 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T07:32:11,512 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=991144de426cafcdf6a39be16db812fb, server=5eb3d201e8c9,33373,1732519920437 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T07:32:11,512 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=2 because the exception is null or not the one we care about 2024-11-25T07:32:11,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:11,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:12,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:12,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:13,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:13,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:14,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:14,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:15,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:15,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:16,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:16,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:17,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:17,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:18,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:18,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:19,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:19,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:20,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:20,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:21,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:21,529 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 991144de426cafcdf6a39be16db812fb 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-25T07:32:21,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/42bacba6a9924f7c9be58568e087e365 is 1080, key is row0008/info:/1732519931454/Put/seqid=0 2024-11-25T07:32:21,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741838_1014 (size=29761) 2024-11-25T07:32:21,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741838_1014 (size=29761) 2024-11-25T07:32:21,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/42bacba6a9924f7c9be58568e087e365 2024-11-25T07:32:21,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/42bacba6a9924f7c9be58568e087e365 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/42bacba6a9924f7c9be58568e087e365 2024-11-25T07:32:21,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/42bacba6a9924f7c9be58568e087e365, entries=23, sequenceid=37, filesize=29.1 K 2024-11-25T07:32:21,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 991144de426cafcdf6a39be16db812fb in 21ms, sequenceid=37, compaction requested=false 2024-11-25T07:32:21,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 991144de426cafcdf6a39be16db812fb: 2024-11-25T07:32:21,550 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-25T07:32:21,550 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:32:21,550 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/42bacba6a9924f7c9be58568e087e365 because midkey is the same as first or last row 2024-11-25T07:32:21,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:21,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:22,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:22,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:23,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:23,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 991144de426cafcdf6a39be16db812fb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T07:32:23,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/364c90bc524f49b689f1d20521c90b03 is 1080, key is row0031/info:/1732519941530/Put/seqid=0 2024-11-25T07:32:23,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741839_1015 (size=12509) 2024-11-25T07:32:23,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741839_1015 (size=12509) 2024-11-25T07:32:23,552 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/364c90bc524f49b689f1d20521c90b03 2024-11-25T07:32:23,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/364c90bc524f49b689f1d20521c90b03 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/364c90bc524f49b689f1d20521c90b03 2024-11-25T07:32:23,562 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/364c90bc524f49b689f1d20521c90b03, entries=7, sequenceid=47, filesize=12.2 K 2024-11-25T07:32:23,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 991144de426cafcdf6a39be16db812fb in 22ms, sequenceid=47, compaction requested=true 2024-11-25T07:32:23,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 991144de426cafcdf6a39be16db812fb: 2024-11-25T07:32:23,563 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-25T07:32:23,563 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:32:23,563 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/42bacba6a9924f7c9be58568e087e365 because midkey is the same as first or last row 2024-11-25T07:32:23,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 991144de426cafcdf6a39be16db812fb:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T07:32:23,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:23,564 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T07:32:23,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:23,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 991144de426cafcdf6a39be16db812fb 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-25T07:32:23,565 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T07:32:23,565 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1541): 991144de426cafcdf6a39be16db812fb/info is initiating minor compaction (all files) 2024-11-25T07:32:23,565 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 991144de426cafcdf6a39be16db812fb/info in TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:23,565 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/36beda8ccb6c4fe5a9f0499568682d9c, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/42bacba6a9924f7c9be58568e087e365, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/364c90bc524f49b689f1d20521c90b03] into tmpdir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp, totalSize=53.5 K 2024-11-25T07:32:23,565 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 36beda8ccb6c4fe5a9f0499568682d9c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732519931443 2024-11-25T07:32:23,566 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 42bacba6a9924f7c9be58568e087e365, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732519931454 2024-11-25T07:32:23,566 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 364c90bc524f49b689f1d20521c90b03, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732519941530 2024-11-25T07:32:23,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/56bf4e7c036a4393a1fdc56411a4921f is 1080, key is row0038/info:/1732519943542/Put/seqid=0 2024-11-25T07:32:23,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741840_1016 (size=20064) 2024-11-25T07:32:23,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741840_1016 (size=20064) 2024-11-25T07:32:23,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/56bf4e7c036a4393a1fdc56411a4921f 2024-11-25T07:32:23,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/56bf4e7c036a4393a1fdc56411a4921f as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/56bf4e7c036a4393a1fdc56411a4921f 2024-11-25T07:32:23,580 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 991144de426cafcdf6a39be16db812fb#info#compaction#59 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:32:23,580 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/b9d4745774454433b10a07b2f09e22e3 is 1080, key is row0001/info:/1732519931443/Put/seqid=0 2024-11-25T07:32:23,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/56bf4e7c036a4393a1fdc56411a4921f, entries=14, sequenceid=64, filesize=19.6 K 2024-11-25T07:32:23,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 991144de426cafcdf6a39be16db812fb in 21ms, sequenceid=64, compaction requested=false 2024-11-25T07:32:23,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 991144de426cafcdf6a39be16db812fb: 2024-11-25T07:32:23,585 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.1 K, sizeToCheck=16.0 K 2024-11-25T07:32:23,585 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:32:23,585 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/42bacba6a9924f7c9be58568e087e365 because midkey is the same as first or last row 2024-11-25T07:32:23,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741841_1017 (size=44978) 2024-11-25T07:32:23,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:23,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 991144de426cafcdf6a39be16db812fb 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-25T07:32:23,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741841_1017 (size=44978) 2024-11-25T07:32:23,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/b2d085ede4044fa9a6994460ccdd679d is 1080, key is row0052/info:/1732519943565/Put/seqid=0 2024-11-25T07:32:23,591 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/b9d4745774454433b10a07b2f09e22e3 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b9d4745774454433b10a07b2f09e22e3 2024-11-25T07:32:23,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741842_1018 (size=17894) 2024-11-25T07:32:23,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741842_1018 (size=17894) 2024-11-25T07:32:23,594 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/b2d085ede4044fa9a6994460ccdd679d 2024-11-25T07:32:23,597 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 991144de426cafcdf6a39be16db812fb/info of 991144de426cafcdf6a39be16db812fb into b9d4745774454433b10a07b2f09e22e3(size=43.9 K), total size for store is 63.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:32:23,597 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 991144de426cafcdf6a39be16db812fb: 2024-11-25T07:32:23,597 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., storeName=991144de426cafcdf6a39be16db812fb/info, priority=13, startTime=1732519943563; duration=0sec 2024-11-25T07:32:23,597 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-25T07:32:23,598 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:32:23,598 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b9d4745774454433b10a07b2f09e22e3 because midkey is the same as first or last row 2024-11-25T07:32:23,598 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-25T07:32:23,598 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:32:23,598 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b9d4745774454433b10a07b2f09e22e3 because midkey is the same as first or last row 2024-11-25T07:32:23,598 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-25T07:32:23,598 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:32:23,598 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b9d4745774454433b10a07b2f09e22e3 because midkey is the same as first or last row 2024-11-25T07:32:23,598 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:23,598 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 991144de426cafcdf6a39be16db812fb:info 2024-11-25T07:32:23,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/b2d085ede4044fa9a6994460ccdd679d as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b2d085ede4044fa9a6994460ccdd679d 2024-11-25T07:32:23,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b2d085ede4044fa9a6994460ccdd679d, entries=12, sequenceid=79, filesize=17.5 K 2024-11-25T07:32:23,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 991144de426cafcdf6a39be16db812fb in 19ms, sequenceid=79, compaction requested=true 2024-11-25T07:32:23,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 991144de426cafcdf6a39be16db812fb: 2024-11-25T07:32:23,605 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=81.0 K, sizeToCheck=16.0 K 2024-11-25T07:32:23,605 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:32:23,605 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b9d4745774454433b10a07b2f09e22e3 because midkey is the same as first or last row 2024-11-25T07:32:23,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 991144de426cafcdf6a39be16db812fb:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T07:32:23,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:23,605 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T07:32:23,606 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 82936 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T07:32:23,606 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1541): 991144de426cafcdf6a39be16db812fb/info is initiating minor compaction (all files) 2024-11-25T07:32:23,606 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 991144de426cafcdf6a39be16db812fb/info in TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:23,606 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b9d4745774454433b10a07b2f09e22e3, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/56bf4e7c036a4393a1fdc56411a4921f, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b2d085ede4044fa9a6994460ccdd679d] into tmpdir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp, totalSize=81.0 K 2024-11-25T07:32:23,606 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting b9d4745774454433b10a07b2f09e22e3, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732519931443 2024-11-25T07:32:23,607 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 56bf4e7c036a4393a1fdc56411a4921f, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1732519943542 2024-11-25T07:32:23,607 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting b2d085ede4044fa9a6994460ccdd679d, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732519943565 2024-11-25T07:32:23,617 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 991144de426cafcdf6a39be16db812fb#info#compaction#61 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:32:23,617 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/89d372ae1d234ef8b4d3ffeb2cf8a443 is 1080, key is row0001/info:/1732519931443/Put/seqid=0 2024-11-25T07:32:23,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741843_1019 (size=73224) 2024-11-25T07:32:23,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741843_1019 (size=73224) 2024-11-25T07:32:23,627 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/89d372ae1d234ef8b4d3ffeb2cf8a443 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/89d372ae1d234ef8b4d3ffeb2cf8a443 2024-11-25T07:32:23,633 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 991144de426cafcdf6a39be16db812fb/info of 991144de426cafcdf6a39be16db812fb into 89d372ae1d234ef8b4d3ffeb2cf8a443(size=71.5 K), total size for store is 71.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:32:23,633 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 991144de426cafcdf6a39be16db812fb: 2024-11-25T07:32:23,633 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., storeName=991144de426cafcdf6a39be16db812fb/info, priority=13, startTime=1732519943605; duration=0sec 2024-11-25T07:32:23,633 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-25T07:32:23,633 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:32:23,634 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-25T07:32:23,634 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:32:23,634 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-25T07:32:23,634 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T07:32:23,635 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:23,635 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:23,635 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 991144de426cafcdf6a39be16db812fb:info 2024-11-25T07:32:23,636 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41919 {}] assignment.AssignmentManager(1363): Split request from 5eb3d201e8c9,33373,1732519920437, parent={ENCODED => 991144de426cafcdf6a39be16db812fb, NAME => 'TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-25T07:32:23,641 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41919 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:23,644 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41919 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=991144de426cafcdf6a39be16db812fb, daughterA=5519f0cd9d3b7b90490198126cbbdc73, daughterB=30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:23,645 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=991144de426cafcdf6a39be16db812fb, daughterA=5519f0cd9d3b7b90490198126cbbdc73, daughterB=30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:23,645 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=991144de426cafcdf6a39be16db812fb, daughterA=5519f0cd9d3b7b90490198126cbbdc73, daughterB=30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:23,645 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=991144de426cafcdf6a39be16db812fb, daughterA=5519f0cd9d3b7b90490198126cbbdc73, daughterB=30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:23,652 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=991144de426cafcdf6a39be16db812fb, UNASSIGN}] 2024-11-25T07:32:23,653 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=991144de426cafcdf6a39be16db812fb, UNASSIGN 2024-11-25T07:32:23,654 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=991144de426cafcdf6a39be16db812fb, regionState=CLOSING, regionLocation=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:23,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=991144de426cafcdf6a39be16db812fb, UNASSIGN because future has completed 2024-11-25T07:32:23,657 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-25T07:32:23,657 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 991144de426cafcdf6a39be16db812fb, server=5eb3d201e8c9,33373,1732519920437}] 2024-11-25T07:32:23,813 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:23,814 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-25T07:32:23,814 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 991144de426cafcdf6a39be16db812fb, disabling compactions & flushes 2024-11-25T07:32:23,814 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:23,814 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:23,814 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. after waiting 0 ms 2024-11-25T07:32:23,814 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:23,814 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 991144de426cafcdf6a39be16db812fb 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T07:32:23,818 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/abc0825106b344a1a7be8495fa3aeee6 is 1080, key is row0064/info:/1732519943587/Put/seqid=0 2024-11-25T07:32:23,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741844_1020 (size=6033) 2024-11-25T07:32:23,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741844_1020 (size=6033) 2024-11-25T07:32:23,824 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/abc0825106b344a1a7be8495fa3aeee6 2024-11-25T07:32:23,829 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/.tmp/info/abc0825106b344a1a7be8495fa3aeee6 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/abc0825106b344a1a7be8495fa3aeee6 2024-11-25T07:32:23,834 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/abc0825106b344a1a7be8495fa3aeee6, entries=1, sequenceid=85, filesize=5.9 K 2024-11-25T07:32:23,835 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 991144de426cafcdf6a39be16db812fb in 21ms, sequenceid=85, compaction requested=false 2024-11-25T07:32:23,836 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/36beda8ccb6c4fe5a9f0499568682d9c, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/42bacba6a9924f7c9be58568e087e365, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b9d4745774454433b10a07b2f09e22e3, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/364c90bc524f49b689f1d20521c90b03, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/56bf4e7c036a4393a1fdc56411a4921f, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b2d085ede4044fa9a6994460ccdd679d] to archive 2024-11-25T07:32:23,837 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T07:32:23,838 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/36beda8ccb6c4fe5a9f0499568682d9c to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/36beda8ccb6c4fe5a9f0499568682d9c 2024-11-25T07:32:23,839 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/42bacba6a9924f7c9be58568e087e365 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/42bacba6a9924f7c9be58568e087e365 2024-11-25T07:32:23,840 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b9d4745774454433b10a07b2f09e22e3 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b9d4745774454433b10a07b2f09e22e3 2024-11-25T07:32:23,841 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/364c90bc524f49b689f1d20521c90b03 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/364c90bc524f49b689f1d20521c90b03 2024-11-25T07:32:23,842 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/56bf4e7c036a4393a1fdc56411a4921f to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/56bf4e7c036a4393a1fdc56411a4921f 2024-11-25T07:32:23,843 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b2d085ede4044fa9a6994460ccdd679d to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/b2d085ede4044fa9a6994460ccdd679d 2024-11-25T07:32:23,848 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=1 2024-11-25T07:32:23,849 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. 2024-11-25T07:32:23,849 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 991144de426cafcdf6a39be16db812fb: Waiting for close lock at 1732519943814Running coprocessor pre-close hooks at 1732519943814Disabling compacts and flushes for region at 1732519943814Disabling writes for close at 1732519943814Obtaining lock to block concurrent updates at 1732519943814Preparing flush snapshotting stores in 991144de426cafcdf6a39be16db812fb at 1732519943814Finished memstore snapshotting TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732519943815 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. at 1732519943815Flushing 991144de426cafcdf6a39be16db812fb/info: creating writer at 1732519943815Flushing 991144de426cafcdf6a39be16db812fb/info: appending metadata at 1732519943818 (+3 ms)Flushing 991144de426cafcdf6a39be16db812fb/info: closing flushed file at 1732519943818Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@692eef50: reopening flushed file at 1732519943829 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 991144de426cafcdf6a39be16db812fb in 21ms, sequenceid=85, compaction requested=false at 1732519943835 (+6 ms)Writing region close event to WAL at 1732519943846 (+11 ms)Running coprocessor post-close hooks at 1732519943849 (+3 ms)Closed at 1732519943849 2024-11-25T07:32:23,851 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:23,852 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=991144de426cafcdf6a39be16db812fb, regionState=CLOSED 2024-11-25T07:32:23,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 991144de426cafcdf6a39be16db812fb, server=5eb3d201e8c9,33373,1732519920437 because future has completed 2024-11-25T07:32:23,857 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-25T07:32:23,857 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 991144de426cafcdf6a39be16db812fb, server=5eb3d201e8c9,33373,1732519920437 in 198 msec 2024-11-25T07:32:23,859 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-25T07:32:23,859 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=991144de426cafcdf6a39be16db812fb, UNASSIGN in 205 msec 2024-11-25T07:32:23,865 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:23,868 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=991144de426cafcdf6a39be16db812fb, threads=2 2024-11-25T07:32:23,870 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/abc0825106b344a1a7be8495fa3aeee6 for region: 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:23,870 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/89d372ae1d234ef8b4d3ffeb2cf8a443 for region: 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:23,877 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/abc0825106b344a1a7be8495fa3aeee6, top=true 2024-11-25T07:32:23,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741845_1021 (size=27) 2024-11-25T07:32:23,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741845_1021 (size=27) 2024-11-25T07:32:23,883 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/TestLogRolling-testLogRolling=991144de426cafcdf6a39be16db812fb-abc0825106b344a1a7be8495fa3aeee6 for child: 30767e2e8b52b777e66aa2db4c7fcb90, parent: 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:23,883 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/abc0825106b344a1a7be8495fa3aeee6 for region: 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:23,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741846_1022 (size=27) 2024-11-25T07:32:23,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741846_1022 (size=27) 2024-11-25T07:32:23,891 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/89d372ae1d234ef8b4d3ffeb2cf8a443 for region: 991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:23,893 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 991144de426cafcdf6a39be16db812fb Daughter A: [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73/info/89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb] storefiles, Daughter B: [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/TestLogRolling-testLogRolling=991144de426cafcdf6a39be16db812fb-abc0825106b344a1a7be8495fa3aeee6] storefiles. 2024-11-25T07:32:23,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:23,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741847_1023 (size=71) 2024-11-25T07:32:23,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741847_1023 (size=71) 2024-11-25T07:32:23,901 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:23,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:23,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741848_1024 (size=71) 2024-11-25T07:32:23,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741848_1024 (size=71) 2024-11-25T07:32:23,912 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:23,919 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-25T07:32:23,921 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-25T07:32:23,923 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732519943922"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732519943922"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732519943922"}]},"ts":"1732519943922"} 2024-11-25T07:32:23,923 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732519943922"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732519943922"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732519943922"}]},"ts":"1732519943922"} 2024-11-25T07:32:23,923 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732519943922"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732519943922"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732519943922"}]},"ts":"1732519943922"} 2024-11-25T07:32:23,939 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5519f0cd9d3b7b90490198126cbbdc73, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=30767e2e8b52b777e66aa2db4c7fcb90, ASSIGN}] 2024-11-25T07:32:23,941 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5519f0cd9d3b7b90490198126cbbdc73, ASSIGN 2024-11-25T07:32:23,941 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=30767e2e8b52b777e66aa2db4c7fcb90, ASSIGN 2024-11-25T07:32:23,941 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5519f0cd9d3b7b90490198126cbbdc73, ASSIGN; state=SPLITTING_NEW, location=5eb3d201e8c9,33373,1732519920437; forceNewPlan=false, retain=false 2024-11-25T07:32:23,942 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=30767e2e8b52b777e66aa2db4c7fcb90, ASSIGN; state=SPLITTING_NEW, location=5eb3d201e8c9,33373,1732519920437; forceNewPlan=false, retain=false 2024-11-25T07:32:24,092 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=30767e2e8b52b777e66aa2db4c7fcb90, regionState=OPENING, regionLocation=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:24,092 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=5519f0cd9d3b7b90490198126cbbdc73, regionState=OPENING, regionLocation=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:24,094 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=30767e2e8b52b777e66aa2db4c7fcb90, ASSIGN because future has completed 2024-11-25T07:32:24,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 30767e2e8b52b777e66aa2db4c7fcb90, server=5eb3d201e8c9,33373,1732519920437}] 2024-11-25T07:32:24,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5519f0cd9d3b7b90490198126cbbdc73, ASSIGN because future has completed 2024-11-25T07:32:24,096 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5519f0cd9d3b7b90490198126cbbdc73, server=5eb3d201e8c9,33373,1732519920437}] 2024-11-25T07:32:24,250 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:24,251 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 30767e2e8b52b777e66aa2db4c7fcb90, NAME => 'TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-25T07:32:24,251 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:24,251 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:32:24,251 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:24,251 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:24,253 INFO [StoreOpener-30767e2e8b52b777e66aa2db4c7fcb90-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:24,253 INFO [StoreOpener-30767e2e8b52b777e66aa2db4c7fcb90-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 30767e2e8b52b777e66aa2db4c7fcb90 columnFamilyName info 2024-11-25T07:32:24,253 DEBUG [StoreOpener-30767e2e8b52b777e66aa2db4c7fcb90-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:24,262 DEBUG [StoreOpener-30767e2e8b52b777e66aa2db4c7fcb90-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb->hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/89d372ae1d234ef8b4d3ffeb2cf8a443-top 2024-11-25T07:32:24,266 DEBUG [StoreOpener-30767e2e8b52b777e66aa2db4c7fcb90-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/TestLogRolling-testLogRolling=991144de426cafcdf6a39be16db812fb-abc0825106b344a1a7be8495fa3aeee6 2024-11-25T07:32:24,266 INFO [StoreOpener-30767e2e8b52b777e66aa2db4c7fcb90-1 {}] regionserver.HStore(327): Store=30767e2e8b52b777e66aa2db4c7fcb90/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:32:24,266 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:24,267 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:24,268 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:24,268 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:24,268 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:24,270 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:24,271 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 30767e2e8b52b777e66aa2db4c7fcb90; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=848721, jitterRate=0.07920484244823456}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T07:32:24,271 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:24,271 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 30767e2e8b52b777e66aa2db4c7fcb90: Running coprocessor pre-open hook at 1732519944251Writing region info on filesystem at 1732519944251Initializing all the Stores at 1732519944252 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519944252Cleaning up temporary data from old regions at 1732519944268 (+16 ms)Running coprocessor post-open hooks at 1732519944271 (+3 ms)Region opened successfully at 1732519944271 2024-11-25T07:32:24,272 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., pid=12, masterSystemTime=1732519944247 2024-11-25T07:32:24,272 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 30767e2e8b52b777e66aa2db4c7fcb90:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T07:32:24,272 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:24,272 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-25T07:32:24,274 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:24,274 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1541): 30767e2e8b52b777e66aa2db4c7fcb90/info is initiating minor compaction (all files) 2024-11-25T07:32:24,274 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 30767e2e8b52b777e66aa2db4c7fcb90/info in TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:24,275 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb->hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/89d372ae1d234ef8b4d3ffeb2cf8a443-top, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/TestLogRolling-testLogRolling=991144de426cafcdf6a39be16db812fb-abc0825106b344a1a7be8495fa3aeee6] into tmpdir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp, totalSize=77.4 K 2024-11-25T07:32:24,275 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:24,275 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:24,275 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73. 2024-11-25T07:32:24,275 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 5519f0cd9d3b7b90490198126cbbdc73, NAME => 'TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-25T07:32:24,275 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732519931443 2024-11-25T07:32:24,275 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 5519f0cd9d3b7b90490198126cbbdc73 2024-11-25T07:32:24,276 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:32:24,276 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 5519f0cd9d3b7b90490198126cbbdc73 2024-11-25T07:32:24,276 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 5519f0cd9d3b7b90490198126cbbdc73 2024-11-25T07:32:24,276 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=991144de426cafcdf6a39be16db812fb-abc0825106b344a1a7be8495fa3aeee6, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1732519943587 2024-11-25T07:32:24,276 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=30767e2e8b52b777e66aa2db4c7fcb90, regionState=OPEN, openSeqNum=89, regionLocation=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:24,277 INFO [StoreOpener-5519f0cd9d3b7b90490198126cbbdc73-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5519f0cd9d3b7b90490198126cbbdc73 2024-11-25T07:32:24,278 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-25T07:32:24,278 INFO [StoreOpener-5519f0cd9d3b7b90490198126cbbdc73-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5519f0cd9d3b7b90490198126cbbdc73 columnFamilyName info 2024-11-25T07:32:24,278 DEBUG [StoreOpener-5519f0cd9d3b7b90490198126cbbdc73-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:24,279 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-25T07:32:24,279 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-25T07:32:24,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 30767e2e8b52b777e66aa2db4c7fcb90, server=5eb3d201e8c9,33373,1732519920437 because future has completed 2024-11-25T07:32:24,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-25T07:32:24,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 30767e2e8b52b777e66aa2db4c7fcb90, server=5eb3d201e8c9,33373,1732519920437 in 186 msec 2024-11-25T07:32:24,287 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=30767e2e8b52b777e66aa2db4c7fcb90, ASSIGN in 347 msec 2024-11-25T07:32:24,289 DEBUG [StoreOpener-5519f0cd9d3b7b90490198126cbbdc73-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73/info/89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb->hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/89d372ae1d234ef8b4d3ffeb2cf8a443-bottom 2024-11-25T07:32:24,289 INFO [StoreOpener-5519f0cd9d3b7b90490198126cbbdc73-1 {}] regionserver.HStore(327): Store=5519f0cd9d3b7b90490198126cbbdc73/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:32:24,290 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 5519f0cd9d3b7b90490198126cbbdc73 2024-11-25T07:32:24,293 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73 2024-11-25T07:32:24,294 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73 2024-11-25T07:32:24,294 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 5519f0cd9d3b7b90490198126cbbdc73 2024-11-25T07:32:24,294 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 5519f0cd9d3b7b90490198126cbbdc73 2024-11-25T07:32:24,296 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 5519f0cd9d3b7b90490198126cbbdc73 2024-11-25T07:32:24,297 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 5519f0cd9d3b7b90490198126cbbdc73; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716098, jitterRate=-0.08943545818328857}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T07:32:24,297 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5519f0cd9d3b7b90490198126cbbdc73 2024-11-25T07:32:24,297 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 5519f0cd9d3b7b90490198126cbbdc73: Running coprocessor pre-open hook at 1732519944276Writing region info on filesystem at 1732519944276Initializing all the Stores at 1732519944277 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519944277Cleaning up temporary data from old regions at 1732519944294 (+17 ms)Running coprocessor post-open hooks at 1732519944297 (+3 ms)Region opened successfully at 1732519944297 2024-11-25T07:32:24,297 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73., pid=13, masterSystemTime=1732519944247 2024-11-25T07:32:24,298 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 5519f0cd9d3b7b90490198126cbbdc73:info, priority=-2147483648, current under compaction store size is 2 2024-11-25T07:32:24,298 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:24,298 DEBUG [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-25T07:32:24,298 INFO [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73. 2024-11-25T07:32:24,298 DEBUG [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] regionserver.HStore(1541): 5519f0cd9d3b7b90490198126cbbdc73/info is initiating minor compaction (all files) 2024-11-25T07:32:24,298 INFO [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5519f0cd9d3b7b90490198126cbbdc73/info in TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73. 2024-11-25T07:32:24,298 INFO [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73/info/89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb->hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/89d372ae1d234ef8b4d3ffeb2cf8a443-bottom] into tmpdir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73/.tmp, totalSize=71.5 K 2024-11-25T07:32:24,299 DEBUG [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] compactions.Compactor(225): Compacting 89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732519931443 2024-11-25T07:32:24,300 DEBUG [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73. 2024-11-25T07:32:24,300 INFO [RS_OPEN_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73. 2024-11-25T07:32:24,300 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=5519f0cd9d3b7b90490198126cbbdc73, regionState=OPEN, openSeqNum=89, regionLocation=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:24,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5519f0cd9d3b7b90490198126cbbdc73, server=5eb3d201e8c9,33373,1732519920437 because future has completed 2024-11-25T07:32:24,303 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 30767e2e8b52b777e66aa2db4c7fcb90#info#compaction#64 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:32:24,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/.tmp/info/a36766bfa3f64b5abe2e50e16a2f35dd is 193, key is TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90./info:regioninfo/1732519944276/Put/seqid=0 2024-11-25T07:32:24,303 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/ea39497feca54492b0b752b6c2b60d5c is 1080, key is row0062/info:/1732519943583/Put/seqid=0 2024-11-25T07:32:24,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-25T07:32:24,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 5519f0cd9d3b7b90490198126cbbdc73, server=5eb3d201e8c9,33373,1732519920437 in 208 msec 2024-11-25T07:32:24,309 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-25T07:32:24,309 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5519f0cd9d3b7b90490198126cbbdc73, ASSIGN in 368 msec 2024-11-25T07:32:24,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=991144de426cafcdf6a39be16db812fb, daughterA=5519f0cd9d3b7b90490198126cbbdc73, daughterB=30767e2e8b52b777e66aa2db4c7fcb90 in 668 msec 2024-11-25T07:32:24,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741849_1025 (size=8359) 2024-11-25T07:32:24,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741849_1025 (size=8359) 2024-11-25T07:32:24,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741850_1026 (size=9882) 2024-11-25T07:32:24,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741850_1026 (size=9882) 2024-11-25T07:32:24,319 INFO [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5519f0cd9d3b7b90490198126cbbdc73#info#compaction#65 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:32:24,320 DEBUG [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73/.tmp/info/8a7ad9d9afd24e138720f3d33be08de8 is 1080, key is row0001/info:/1732519931443/Put/seqid=0 2024-11-25T07:32:24,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741851_1027 (size=70862) 2024-11-25T07:32:24,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741851_1027 (size=70862) 2024-11-25T07:32:24,326 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/ea39497feca54492b0b752b6c2b60d5c as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/ea39497feca54492b0b752b6c2b60d5c 2024-11-25T07:32:24,329 DEBUG [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73/.tmp/info/8a7ad9d9afd24e138720f3d33be08de8 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73/info/8a7ad9d9afd24e138720f3d33be08de8 2024-11-25T07:32:24,332 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in 30767e2e8b52b777e66aa2db4c7fcb90/info of 30767e2e8b52b777e66aa2db4c7fcb90 into ea39497feca54492b0b752b6c2b60d5c(size=8.2 K), total size for store is 8.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:32:24,332 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:24,333 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., storeName=30767e2e8b52b777e66aa2db4c7fcb90/info, priority=14, startTime=1732519944272; duration=0sec 2024-11-25T07:32:24,333 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:24,333 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 30767e2e8b52b777e66aa2db4c7fcb90:info 2024-11-25T07:32:24,334 INFO [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 5519f0cd9d3b7b90490198126cbbdc73/info of 5519f0cd9d3b7b90490198126cbbdc73 into 8a7ad9d9afd24e138720f3d33be08de8(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:32:24,334 DEBUG [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5519f0cd9d3b7b90490198126cbbdc73: 2024-11-25T07:32:24,334 INFO [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73., storeName=5519f0cd9d3b7b90490198126cbbdc73/info, priority=15, startTime=1732519944298; duration=0sec 2024-11-25T07:32:24,334 DEBUG [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:24,334 DEBUG [RS:0;5eb3d201e8c9:33373-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5519f0cd9d3b7b90490198126cbbdc73:info 2024-11-25T07:32:24,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/.tmp/info/a36766bfa3f64b5abe2e50e16a2f35dd 2024-11-25T07:32:24,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/.tmp/ns/75e800ecce8e49658be48fe388fb59b2 is 43, key is default/ns:d/1732519921262/Put/seqid=0 2024-11-25T07:32:24,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741852_1028 (size=5153) 2024-11-25T07:32:24,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741852_1028 (size=5153) 2024-11-25T07:32:24,743 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/.tmp/ns/75e800ecce8e49658be48fe388fb59b2 2024-11-25T07:32:24,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/.tmp/table/b63038696d70409fa9144d824664592c is 65, key is TestLogRolling-testLogRolling/table:state/1732519921730/Put/seqid=0 2024-11-25T07:32:24,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741853_1029 (size=5340) 2024-11-25T07:32:24,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741853_1029 (size=5340) 2024-11-25T07:32:24,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/.tmp/table/b63038696d70409fa9144d824664592c 2024-11-25T07:32:24,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/.tmp/info/a36766bfa3f64b5abe2e50e16a2f35dd as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/info/a36766bfa3f64b5abe2e50e16a2f35dd 2024-11-25T07:32:24,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/info/a36766bfa3f64b5abe2e50e16a2f35dd, entries=30, sequenceid=17, filesize=9.7 K 2024-11-25T07:32:24,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/.tmp/ns/75e800ecce8e49658be48fe388fb59b2 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/ns/75e800ecce8e49658be48fe388fb59b2 2024-11-25T07:32:24,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/ns/75e800ecce8e49658be48fe388fb59b2, entries=2, sequenceid=17, filesize=5.0 K 2024-11-25T07:32:24,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/.tmp/table/b63038696d70409fa9144d824664592c as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/table/b63038696d70409fa9144d824664592c 2024-11-25T07:32:24,783 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/table/b63038696d70409fa9144d824664592c, entries=2, sequenceid=17, filesize=5.2 K 2024-11-25T07:32:24,784 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 505ms, sequenceid=17, compaction requested=false 2024-11-25T07:32:24,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-25T07:32:24,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:24,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:25,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:56934 deadline: 1732519955589, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. is not online on 5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:25,591 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. is not online on 5eb3d201e8c9,33373,1732519920437 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T07:32:25,591 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb. is not online on 5eb3d201e8c9,33373,1732519920437 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T07:32:25,591 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732519921370.991144de426cafcdf6a39be16db812fb., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=2 from cache 2024-11-25T07:32:25,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:25,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:26,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:26,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:27,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:27,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:28,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,874 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,874 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,874 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:28,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:28,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:29,381 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T07:32:29,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T07:32:29,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:29,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:30,374 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T07:32:30,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:30,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:31,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:31,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:32,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:32,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:33,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:33,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:34,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:34,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:35,704 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=89] 2024-11-25T07:32:35,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:35,715 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T07:32:35,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/f5ab5672a4f14519b0631135a283a640 is 1080, key is row0065/info:/1732519955705/Put/seqid=0 2024-11-25T07:32:35,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741854_1030 (size=12509) 2024-11-25T07:32:35,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741854_1030 (size=12509) 2024-11-25T07:32:35,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/f5ab5672a4f14519b0631135a283a640 2024-11-25T07:32:35,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/f5ab5672a4f14519b0631135a283a640 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/f5ab5672a4f14519b0631135a283a640 2024-11-25T07:32:35,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/f5ab5672a4f14519b0631135a283a640, entries=7, sequenceid=99, filesize=12.2 K 2024-11-25T07:32:35,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 30767e2e8b52b777e66aa2db4c7fcb90 in 21ms, sequenceid=99, compaction requested=false 2024-11-25T07:32:35,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:35,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:35,738 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-25T07:32:35,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/6641d065c32d4209959262ef79e0fd2c is 1080, key is row0072/info:/1732519955715/Put/seqid=0 2024-11-25T07:32:35,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741855_1031 (size=21141) 2024-11-25T07:32:35,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741855_1031 (size=21141) 2024-11-25T07:32:35,747 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/6641d065c32d4209959262ef79e0fd2c 2024-11-25T07:32:35,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/6641d065c32d4209959262ef79e0fd2c as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/6641d065c32d4209959262ef79e0fd2c 2024-11-25T07:32:35,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/6641d065c32d4209959262ef79e0fd2c, entries=15, sequenceid=117, filesize=20.6 K 2024-11-25T07:32:35,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for 30767e2e8b52b777e66aa2db4c7fcb90 in 19ms, sequenceid=117, compaction requested=true 2024-11-25T07:32:35,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:35,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 30767e2e8b52b777e66aa2db4c7fcb90:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T07:32:35,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:35,758 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T07:32:35,759 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42009 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T07:32:35,759 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1541): 30767e2e8b52b777e66aa2db4c7fcb90/info is initiating minor compaction (all files) 2024-11-25T07:32:35,759 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 30767e2e8b52b777e66aa2db4c7fcb90/info in TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:35,759 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/ea39497feca54492b0b752b6c2b60d5c, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/f5ab5672a4f14519b0631135a283a640, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/6641d065c32d4209959262ef79e0fd2c] into tmpdir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp, totalSize=41.0 K 2024-11-25T07:32:35,759 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting ea39497feca54492b0b752b6c2b60d5c, keycount=3, bloomtype=ROW, size=8.2 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1732519943583 2024-11-25T07:32:35,759 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting f5ab5672a4f14519b0631135a283a640, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1732519955705 2024-11-25T07:32:35,760 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6641d065c32d4209959262ef79e0fd2c, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732519955715 2024-11-25T07:32:35,769 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 30767e2e8b52b777e66aa2db4c7fcb90#info#compaction#70 average throughput is 25.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:32:35,770 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/aef872fd2c9c4651a21d514fe15d5349 is 1080, key is row0062/info:/1732519943583/Put/seqid=0 2024-11-25T07:32:35,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741856_1032 (size=32183) 2024-11-25T07:32:35,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741856_1032 (size=32183) 2024-11-25T07:32:35,780 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/aef872fd2c9c4651a21d514fe15d5349 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/aef872fd2c9c4651a21d514fe15d5349 2024-11-25T07:32:35,786 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 30767e2e8b52b777e66aa2db4c7fcb90/info of 30767e2e8b52b777e66aa2db4c7fcb90 into aef872fd2c9c4651a21d514fe15d5349(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:32:35,786 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:35,786 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., storeName=30767e2e8b52b777e66aa2db4c7fcb90/info, priority=13, startTime=1732519955758; duration=0sec 2024-11-25T07:32:35,786 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:35,786 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 30767e2e8b52b777e66aa2db4c7fcb90:info 2024-11-25T07:32:35,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:35,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:36,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:36,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:37,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:37,757 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-25T07:32:37,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/cf2de1f1e433472dba87cff5fc852aa6 is 1080, key is row0087/info:/1732519955739/Put/seqid=0 2024-11-25T07:32:37,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741857_1033 (size=16819) 2024-11-25T07:32:37,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741857_1033 (size=16819) 2024-11-25T07:32:37,771 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/cf2de1f1e433472dba87cff5fc852aa6 2024-11-25T07:32:37,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/cf2de1f1e433472dba87cff5fc852aa6 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/cf2de1f1e433472dba87cff5fc852aa6 2024-11-25T07:32:37,783 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/cf2de1f1e433472dba87cff5fc852aa6, entries=11, sequenceid=132, filesize=16.4 K 2024-11-25T07:32:37,784 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=17.86 KB/18292 for 30767e2e8b52b777e66aa2db4c7fcb90 in 28ms, sequenceid=132, compaction requested=false 2024-11-25T07:32:37,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:37,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:37,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-25T07:32:37,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/549b93f3ebf54a638a64219037e5ce5c is 1080, key is row0098/info:/1732519957758/Put/seqid=0 2024-11-25T07:32:37,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741858_1034 (size=24394) 2024-11-25T07:32:37,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741858_1034 (size=24394) 2024-11-25T07:32:37,796 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/549b93f3ebf54a638a64219037e5ce5c 2024-11-25T07:32:37,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/549b93f3ebf54a638a64219037e5ce5c as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/549b93f3ebf54a638a64219037e5ce5c 2024-11-25T07:32:37,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=30767e2e8b52b777e66aa2db4c7fcb90, server=5eb3d201e8c9,33373,1732519920437 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-25T07:32:37,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:56934 deadline: 1732519967803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=30767e2e8b52b777e66aa2db4c7fcb90, server=5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:37,804 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=89 , the old value is region=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=89, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=30767e2e8b52b777e66aa2db4c7fcb90, server=5eb3d201e8c9,33373,1732519920437 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T07:32:37,804 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=89 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=30767e2e8b52b777e66aa2db4c7fcb90, server=5eb3d201e8c9,33373,1732519920437 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T07:32:37,804 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., hostname=5eb3d201e8c9,33373,1732519920437, seqNum=89 because the exception is null or not the one we care about 2024-11-25T07:32:37,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/549b93f3ebf54a638a64219037e5ce5c, entries=18, sequenceid=153, filesize=23.8 K 2024-11-25T07:32:37,807 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for 30767e2e8b52b777e66aa2db4c7fcb90 in 21ms, sequenceid=153, compaction requested=true 2024-11-25T07:32:37,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:37,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 30767e2e8b52b777e66aa2db4c7fcb90:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T07:32:37,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:37,807 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T07:32:37,808 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 73396 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T07:32:37,808 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1541): 30767e2e8b52b777e66aa2db4c7fcb90/info is initiating minor compaction (all files) 2024-11-25T07:32:37,808 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 30767e2e8b52b777e66aa2db4c7fcb90/info in TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:37,808 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/aef872fd2c9c4651a21d514fe15d5349, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/cf2de1f1e433472dba87cff5fc852aa6, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/549b93f3ebf54a638a64219037e5ce5c] into tmpdir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp, totalSize=71.7 K 2024-11-25T07:32:37,808 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting aef872fd2c9c4651a21d514fe15d5349, keycount=25, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732519943583 2024-11-25T07:32:37,809 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting cf2de1f1e433472dba87cff5fc852aa6, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732519955739 2024-11-25T07:32:37,809 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 549b93f3ebf54a638a64219037e5ce5c, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732519957758 2024-11-25T07:32:37,819 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 30767e2e8b52b777e66aa2db4c7fcb90#info#compaction#73 average throughput is 55.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:32:37,819 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/4a3ea088a07b4e798652d2293ecef539 is 1080, key is row0062/info:/1732519943583/Put/seqid=0 2024-11-25T07:32:37,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741859_1035 (size=63642) 2024-11-25T07:32:37,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741859_1035 (size=63642) 2024-11-25T07:32:37,830 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/4a3ea088a07b4e798652d2293ecef539 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/4a3ea088a07b4e798652d2293ecef539 2024-11-25T07:32:37,836 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 30767e2e8b52b777e66aa2db4c7fcb90/info of 30767e2e8b52b777e66aa2db4c7fcb90 into 4a3ea088a07b4e798652d2293ecef539(size=62.2 K), total size for store is 62.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:32:37,836 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:37,837 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., storeName=30767e2e8b52b777e66aa2db4c7fcb90/info, priority=13, startTime=1732519957807; duration=0sec 2024-11-25T07:32:37,837 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:37,837 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 30767e2e8b52b777e66aa2db4c7fcb90:info 2024-11-25T07:32:37,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:37,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:38,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:38,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:39,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:39,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:40,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:40,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:41,680 INFO [master/5eb3d201e8c9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-25T07:32:41,680 INFO [master/5eb3d201e8c9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-25T07:32:41,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:41,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:42,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:42,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:43,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:43,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:44,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:44,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:45,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:45,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:46,230 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-25T07:32:46,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:46,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:47,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:47,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-25T07:32:47,903 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/115d2b6846c5487bb7abf18a7850f24c is 1080, key is row0116/info:/1732519957786/Put/seqid=0 2024-11-25T07:32:47,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741860_1036 (size=17906) 2024-11-25T07:32:47,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741860_1036 (size=17906) 2024-11-25T07:32:47,912 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/115d2b6846c5487bb7abf18a7850f24c 2024-11-25T07:32:47,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/115d2b6846c5487bb7abf18a7850f24c as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/115d2b6846c5487bb7abf18a7850f24c 2024-11-25T07:32:47,921 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/115d2b6846c5487bb7abf18a7850f24c, entries=12, sequenceid=169, filesize=17.5 K 2024-11-25T07:32:47,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:47,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:47,922 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 30767e2e8b52b777e66aa2db4c7fcb90 in 24ms, sequenceid=169, compaction requested=false 2024-11-25T07:32:47,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:48,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:48,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:49,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:49,910 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T07:32:49,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/5c2cbf7666af4992bd37d256c102a0ac is 1080, key is row0128/info:/1732519967900/Put/seqid=0 2024-11-25T07:32:49,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741861_1037 (size=12516) 2024-11-25T07:32:49,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741861_1037 (size=12516) 2024-11-25T07:32:49,920 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/5c2cbf7666af4992bd37d256c102a0ac 2024-11-25T07:32:49,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:49,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:49,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/5c2cbf7666af4992bd37d256c102a0ac as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/5c2cbf7666af4992bd37d256c102a0ac 2024-11-25T07:32:49,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/5c2cbf7666af4992bd37d256c102a0ac, entries=7, sequenceid=179, filesize=12.2 K 2024-11-25T07:32:49,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 30767e2e8b52b777e66aa2db4c7fcb90 in 22ms, sequenceid=179, compaction requested=true 2024-11-25T07:32:49,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:49,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 30767e2e8b52b777e66aa2db4c7fcb90:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T07:32:49,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:49,932 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T07:32:49,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:49,933 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-25T07:32:49,934 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94064 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T07:32:49,934 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1541): 30767e2e8b52b777e66aa2db4c7fcb90/info is initiating minor compaction (all files) 2024-11-25T07:32:49,934 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 30767e2e8b52b777e66aa2db4c7fcb90/info in TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:49,934 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/4a3ea088a07b4e798652d2293ecef539, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/115d2b6846c5487bb7abf18a7850f24c, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/5c2cbf7666af4992bd37d256c102a0ac] into tmpdir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp, totalSize=91.9 K 2024-11-25T07:32:49,934 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4a3ea088a07b4e798652d2293ecef539, keycount=54, bloomtype=ROW, size=62.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732519943583 2024-11-25T07:32:49,935 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 115d2b6846c5487bb7abf18a7850f24c, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732519957786 2024-11-25T07:32:49,935 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5c2cbf7666af4992bd37d256c102a0ac, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732519967900 2024-11-25T07:32:49,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/ed5b084b356241abbb5d92d49cc1f9f7 is 1080, key is row0135/info:/1732519969911/Put/seqid=0 2024-11-25T07:32:49,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741862_1038 (size=21156) 2024-11-25T07:32:49,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741862_1038 (size=21156) 2024-11-25T07:32:49,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/ed5b084b356241abbb5d92d49cc1f9f7 2024-11-25T07:32:49,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/ed5b084b356241abbb5d92d49cc1f9f7 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/ed5b084b356241abbb5d92d49cc1f9f7 2024-11-25T07:32:49,954 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 30767e2e8b52b777e66aa2db4c7fcb90#info#compaction#77 average throughput is 74.91 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:32:49,955 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/e43db1c136904c2eae02890499f63930 is 1080, key is row0062/info:/1732519943583/Put/seqid=0 2024-11-25T07:32:49,956 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/ed5b084b356241abbb5d92d49cc1f9f7, entries=15, sequenceid=197, filesize=20.7 K 2024-11-25T07:32:49,956 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 30767e2e8b52b777e66aa2db4c7fcb90 in 23ms, sequenceid=197, compaction requested=false 2024-11-25T07:32:49,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:49,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741863_1039 (size=84299) 2024-11-25T07:32:49,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741863_1039 (size=84299) 2024-11-25T07:32:49,964 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/e43db1c136904c2eae02890499f63930 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/e43db1c136904c2eae02890499f63930 2024-11-25T07:32:49,968 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 30767e2e8b52b777e66aa2db4c7fcb90/info of 30767e2e8b52b777e66aa2db4c7fcb90 into e43db1c136904c2eae02890499f63930(size=82.3 K), total size for store is 103.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:32:49,968 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:49,968 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., storeName=30767e2e8b52b777e66aa2db4c7fcb90/info, priority=13, startTime=1732519969932; duration=0sec 2024-11-25T07:32:49,968 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:49,968 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 30767e2e8b52b777e66aa2db4c7fcb90:info 2024-11-25T07:32:50,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:50,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:51,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:51,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:51,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:51,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-25T07:32:51,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/37dd4202d2d24c69a9222c8eace474b2 is 1080, key is row0150/info:/1732519969934/Put/seqid=0 2024-11-25T07:32:51,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741864_1040 (size=17906) 2024-11-25T07:32:51,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741864_1040 (size=17906) 2024-11-25T07:32:51,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/37dd4202d2d24c69a9222c8eace474b2 2024-11-25T07:32:51,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/37dd4202d2d24c69a9222c8eace474b2 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/37dd4202d2d24c69a9222c8eace474b2 2024-11-25T07:32:51,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/37dd4202d2d24c69a9222c8eace474b2, entries=12, sequenceid=213, filesize=17.5 K 2024-11-25T07:32:51,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=16.81 KB/17216 for 30767e2e8b52b777e66aa2db4c7fcb90 in 24ms, sequenceid=213, compaction requested=true 2024-11-25T07:32:51,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:51,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 30767e2e8b52b777e66aa2db4c7fcb90:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T07:32:51,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:51,976 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T07:32:51,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:51,977 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-25T07:32:51,977 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123361 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T07:32:51,977 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1541): 30767e2e8b52b777e66aa2db4c7fcb90/info is initiating minor compaction (all files) 2024-11-25T07:32:51,978 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 30767e2e8b52b777e66aa2db4c7fcb90/info in TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:51,978 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/e43db1c136904c2eae02890499f63930, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/ed5b084b356241abbb5d92d49cc1f9f7, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/37dd4202d2d24c69a9222c8eace474b2] into tmpdir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp, totalSize=120.5 K 2024-11-25T07:32:51,978 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting e43db1c136904c2eae02890499f63930, keycount=73, bloomtype=ROW, size=82.3 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732519943583 2024-11-25T07:32:51,979 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting ed5b084b356241abbb5d92d49cc1f9f7, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732519969911 2024-11-25T07:32:51,979 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 37dd4202d2d24c69a9222c8eace474b2, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732519969934 2024-11-25T07:32:51,981 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/7df6ccc219b54a26864423246e2578ff is 1080, key is row0162/info:/1732519971953/Put/seqid=0 2024-11-25T07:32:51,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741865_1041 (size=23316) 2024-11-25T07:32:51,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741865_1041 (size=23316) 2024-11-25T07:32:51,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/7df6ccc219b54a26864423246e2578ff 2024-11-25T07:32:51,991 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 30767e2e8b52b777e66aa2db4c7fcb90#info#compaction#80 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:32:51,992 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/0d8bc276c9ca4a549118915008296b17 is 1080, key is row0062/info:/1732519943583/Put/seqid=0 2024-11-25T07:32:51,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/7df6ccc219b54a26864423246e2578ff as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/7df6ccc219b54a26864423246e2578ff 2024-11-25T07:32:51,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741866_1042 (size=113515) 2024-11-25T07:32:51,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741866_1042 (size=113515) 2024-11-25T07:32:51,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/7df6ccc219b54a26864423246e2578ff, entries=17, sequenceid=233, filesize=22.8 K 2024-11-25T07:32:52,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=12.61 KB/12912 for 30767e2e8b52b777e66aa2db4c7fcb90 in 23ms, sequenceid=233, compaction requested=false 2024-11-25T07:32:52,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:52,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:52,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-25T07:32:52,002 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/0d8bc276c9ca4a549118915008296b17 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/0d8bc276c9ca4a549118915008296b17 2024-11-25T07:32:52,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/08be2041fefd4bdbb574738faf921a3e is 1080, key is row0179/info:/1732519971978/Put/seqid=0 2024-11-25T07:32:52,009 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 30767e2e8b52b777e66aa2db4c7fcb90/info of 30767e2e8b52b777e66aa2db4c7fcb90 into 0d8bc276c9ca4a549118915008296b17(size=110.9 K), total size for store is 133.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:32:52,009 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:52,009 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., storeName=30767e2e8b52b777e66aa2db4c7fcb90/info, priority=13, startTime=1732519971976; duration=0sec 2024-11-25T07:32:52,009 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:52,009 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 30767e2e8b52b777e66aa2db4c7fcb90:info 2024-11-25T07:32:52,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741867_1043 (size=20078) 2024-11-25T07:32:52,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741867_1043 (size=20078) 2024-11-25T07:32:52,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/08be2041fefd4bdbb574738faf921a3e 2024-11-25T07:32:52,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/08be2041fefd4bdbb574738faf921a3e as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/08be2041fefd4bdbb574738faf921a3e 2024-11-25T07:32:52,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/08be2041fefd4bdbb574738faf921a3e, entries=14, sequenceid=250, filesize=19.6 K 2024-11-25T07:32:52,020 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=0 B/0 for 30767e2e8b52b777e66aa2db4c7fcb90 in 18ms, sequenceid=250, compaction requested=true 2024-11-25T07:32:52,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:52,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 30767e2e8b52b777e66aa2db4c7fcb90:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T07:32:52,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:52,020 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T07:32:52,022 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 156909 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T07:32:52,022 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1541): 30767e2e8b52b777e66aa2db4c7fcb90/info is initiating minor compaction (all files) 2024-11-25T07:32:52,022 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 30767e2e8b52b777e66aa2db4c7fcb90/info in TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:52,022 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/0d8bc276c9ca4a549118915008296b17, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/7df6ccc219b54a26864423246e2578ff, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/08be2041fefd4bdbb574738faf921a3e] into tmpdir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp, totalSize=153.2 K 2024-11-25T07:32:52,022 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0d8bc276c9ca4a549118915008296b17, keycount=100, bloomtype=ROW, size=110.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732519943583 2024-11-25T07:32:52,022 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7df6ccc219b54a26864423246e2578ff, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732519971953 2024-11-25T07:32:52,023 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 08be2041fefd4bdbb574738faf921a3e, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732519971978 2024-11-25T07:32:52,033 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 30767e2e8b52b777e66aa2db4c7fcb90#info#compaction#82 average throughput is 67.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:32:52,034 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/26dd07a13f8e43c2ba9d575080a84b99 is 1080, key is row0062/info:/1732519943583/Put/seqid=0 2024-11-25T07:32:52,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741868_1044 (size=147240) 2024-11-25T07:32:52,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741868_1044 (size=147240) 2024-11-25T07:32:52,042 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/26dd07a13f8e43c2ba9d575080a84b99 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/26dd07a13f8e43c2ba9d575080a84b99 2024-11-25T07:32:52,048 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 30767e2e8b52b777e66aa2db4c7fcb90/info of 30767e2e8b52b777e66aa2db4c7fcb90 into 26dd07a13f8e43c2ba9d575080a84b99(size=143.8 K), total size for store is 143.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:32:52,048 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:52,048 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., storeName=30767e2e8b52b777e66aa2db4c7fcb90/info, priority=13, startTime=1732519972020; duration=0sec 2024-11-25T07:32:52,048 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:52,048 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 30767e2e8b52b777e66aa2db4c7fcb90:info 2024-11-25T07:32:52,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:52,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:53,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:53,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:54,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:54,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T07:32:54,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/0726a2b66cf74039ad8ed70e1455ab80 is 1080, key is row0193/info:/1732519974004/Put/seqid=0 2024-11-25T07:32:54,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741869_1045 (size=12522) 2024-11-25T07:32:54,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741869_1045 (size=12522) 2024-11-25T07:32:54,024 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/0726a2b66cf74039ad8ed70e1455ab80 2024-11-25T07:32:54,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/0726a2b66cf74039ad8ed70e1455ab80 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/0726a2b66cf74039ad8ed70e1455ab80 2024-11-25T07:32:54,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/0726a2b66cf74039ad8ed70e1455ab80, entries=7, sequenceid=262, filesize=12.2 K 2024-11-25T07:32:54,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 30767e2e8b52b777e66aa2db4c7fcb90 in 22ms, sequenceid=262, compaction requested=false 2024-11-25T07:32:54,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:54,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:54,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-25T07:32:54,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/e47d295025d447dd8edcd8bcb8d6ec5c is 1080, key is row0200/info:/1732519974014/Put/seqid=0 2024-11-25T07:32:54,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741870_1046 (size=21171) 2024-11-25T07:32:54,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741870_1046 (size=21171) 2024-11-25T07:32:54,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/e47d295025d447dd8edcd8bcb8d6ec5c 2024-11-25T07:32:54,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/e47d295025d447dd8edcd8bcb8d6ec5c as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/e47d295025d447dd8edcd8bcb8d6ec5c 2024-11-25T07:32:54,056 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/e47d295025d447dd8edcd8bcb8d6ec5c, entries=15, sequenceid=280, filesize=20.7 K 2024-11-25T07:32:54,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for 30767e2e8b52b777e66aa2db4c7fcb90 in 19ms, sequenceid=280, compaction requested=true 2024-11-25T07:32:54,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:54,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 30767e2e8b52b777e66aa2db4c7fcb90:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T07:32:54,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:54,057 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T07:32:54,058 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 180933 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T07:32:54,058 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1541): 30767e2e8b52b777e66aa2db4c7fcb90/info is initiating minor compaction (all files) 2024-11-25T07:32:54,058 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 30767e2e8b52b777e66aa2db4c7fcb90/info in TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:54,058 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/26dd07a13f8e43c2ba9d575080a84b99, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/0726a2b66cf74039ad8ed70e1455ab80, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/e47d295025d447dd8edcd8bcb8d6ec5c] into tmpdir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp, totalSize=176.7 K 2024-11-25T07:32:54,058 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 26dd07a13f8e43c2ba9d575080a84b99, keycount=131, bloomtype=ROW, size=143.8 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732519943583 2024-11-25T07:32:54,058 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0726a2b66cf74039ad8ed70e1455ab80, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732519974004 2024-11-25T07:32:54,059 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting e47d295025d447dd8edcd8bcb8d6ec5c, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732519974014 2024-11-25T07:32:54,068 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 30767e2e8b52b777e66aa2db4c7fcb90#info#compaction#85 average throughput is 78.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:32:54,069 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/d01632a73d5644698aee3672879d133b is 1080, key is row0062/info:/1732519943583/Put/seqid=0 2024-11-25T07:32:54,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741871_1047 (size=171087) 2024-11-25T07:32:54,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741871_1047 (size=171087) 2024-11-25T07:32:54,077 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/d01632a73d5644698aee3672879d133b as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/d01632a73d5644698aee3672879d133b 2024-11-25T07:32:54,082 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 30767e2e8b52b777e66aa2db4c7fcb90/info of 30767e2e8b52b777e66aa2db4c7fcb90 into d01632a73d5644698aee3672879d133b(size=167.1 K), total size for store is 167.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:32:54,082 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:54,082 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., storeName=30767e2e8b52b777e66aa2db4c7fcb90/info, priority=13, startTime=1732519974057; duration=0sec 2024-11-25T07:32:54,082 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:54,083 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 30767e2e8b52b777e66aa2db4c7fcb90:info 2024-11-25T07:32:54,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:54,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:55,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:55,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:56,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:56,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-25T07:32:56,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/32d26361f89c44459b8546e417b14a17 is 1080, key is row0215/info:/1732519974038/Put/seqid=0 2024-11-25T07:32:56,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741872_1048 (size=16839) 2024-11-25T07:32:56,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741872_1048 (size=16839) 2024-11-25T07:32:56,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/32d26361f89c44459b8546e417b14a17 2024-11-25T07:32:56,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/32d26361f89c44459b8546e417b14a17 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/32d26361f89c44459b8546e417b14a17 2024-11-25T07:32:56,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/32d26361f89c44459b8546e417b14a17, entries=11, sequenceid=295, filesize=16.4 K 2024-11-25T07:32:56,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for 30767e2e8b52b777e66aa2db4c7fcb90 in 21ms, sequenceid=295, compaction requested=false 2024-11-25T07:32:56,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:56,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:56,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-25T07:32:56,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/37dedd7f990943d38d55db4281a457eb is 1080, key is row0226/info:/1732519976055/Put/seqid=0 2024-11-25T07:32:56,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741873_1049 (size=20092) 2024-11-25T07:32:56,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741873_1049 (size=20092) 2024-11-25T07:32:56,085 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/37dedd7f990943d38d55db4281a457eb 2024-11-25T07:32:56,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/37dedd7f990943d38d55db4281a457eb as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/37dedd7f990943d38d55db4281a457eb 2024-11-25T07:32:56,096 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/37dedd7f990943d38d55db4281a457eb, entries=14, sequenceid=312, filesize=19.6 K 2024-11-25T07:32:56,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 30767e2e8b52b777e66aa2db4c7fcb90 in 21ms, sequenceid=312, compaction requested=true 2024-11-25T07:32:56,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:56,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 30767e2e8b52b777e66aa2db4c7fcb90:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T07:32:56,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:56,097 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T07:32:56,098 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 208018 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T07:32:56,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33373 {}] regionserver.HRegion(8855): Flush requested on 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:56,098 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1541): 30767e2e8b52b777e66aa2db4c7fcb90/info is initiating minor compaction (all files) 2024-11-25T07:32:56,098 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 30767e2e8b52b777e66aa2db4c7fcb90/info in TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:56,098 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-25T07:32:56,098 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/d01632a73d5644698aee3672879d133b, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/32d26361f89c44459b8546e417b14a17, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/37dedd7f990943d38d55db4281a457eb] into tmpdir=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp, totalSize=203.1 K 2024-11-25T07:32:56,099 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting d01632a73d5644698aee3672879d133b, keycount=153, bloomtype=ROW, size=167.1 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732519943583 2024-11-25T07:32:56,099 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 32d26361f89c44459b8546e417b14a17, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732519974038 2024-11-25T07:32:56,100 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] compactions.Compactor(225): Compacting 37dedd7f990943d38d55db4281a457eb, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732519976055 2024-11-25T07:32:56,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/90260b49a57642219fc12ddbb8d830bd is 1080, key is row0240/info:/1732519976077/Put/seqid=0 2024-11-25T07:32:56,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741874_1050 (size=21171) 2024-11-25T07:32:56,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741874_1050 (size=21171) 2024-11-25T07:32:56,108 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/90260b49a57642219fc12ddbb8d830bd 2024-11-25T07:32:56,112 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 30767e2e8b52b777e66aa2db4c7fcb90#info#compaction#89 average throughput is 60.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T07:32:56,113 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/133a517525cf4fa889debfd1578da44f is 1080, key is row0062/info:/1732519943583/Put/seqid=0 2024-11-25T07:32:56,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/90260b49a57642219fc12ddbb8d830bd as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/90260b49a57642219fc12ddbb8d830bd 2024-11-25T07:32:56,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741875_1051 (size=198168) 2024-11-25T07:32:56,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741875_1051 (size=198168) 2024-11-25T07:32:56,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/90260b49a57642219fc12ddbb8d830bd, entries=15, sequenceid=330, filesize=20.7 K 2024-11-25T07:32:56,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=2.10 KB/2152 for 30767e2e8b52b777e66aa2db4c7fcb90 in 22ms, sequenceid=330, compaction requested=false 2024-11-25T07:32:56,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:56,121 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/133a517525cf4fa889debfd1578da44f as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/133a517525cf4fa889debfd1578da44f 2024-11-25T07:32:56,126 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 30767e2e8b52b777e66aa2db4c7fcb90/info of 30767e2e8b52b777e66aa2db4c7fcb90 into 133a517525cf4fa889debfd1578da44f(size=193.5 K), total size for store is 214.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T07:32:56,126 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:56,127 INFO [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., storeName=30767e2e8b52b777e66aa2db4c7fcb90/info, priority=13, startTime=1732519976097; duration=0sec 2024-11-25T07:32:56,127 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T07:32:56,127 DEBUG [RS:0;5eb3d201e8c9:33373-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 30767e2e8b52b777e66aa2db4c7fcb90:info 2024-11-25T07:32:56,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:56,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:57,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:57,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:58,102 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-25T07:32:58,102 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C33373%2C1732519920437.1732519978102 2024-11-25T07:32:58,108 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,108 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,108 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,108 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,108 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,108 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/WALs/5eb3d201e8c9,33373,1732519920437/5eb3d201e8c9%2C33373%2C1732519920437.1732519920827 with entries=313, filesize=308.60 KB; new WAL /user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/WALs/5eb3d201e8c9,33373,1732519920437/5eb3d201e8c9%2C33373%2C1732519920437.1732519978102 2024-11-25T07:32:58,109 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39055:39055),(127.0.0.1/127.0.0.1:36405:36405)] 2024-11-25T07:32:58,109 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/WALs/5eb3d201e8c9,33373,1732519920437/5eb3d201e8c9%2C33373%2C1732519920437.1732519920827 is not closed yet, will try archiving it next time 2024-11-25T07:32:58,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741833_1009 (size=316019) 2024-11-25T07:32:58,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741833_1009 (size=316019) 2024-11-25T07:32:58,113 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 30767e2e8b52b777e66aa2db4c7fcb90 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-25T07:32:58,116 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/7c2a91fdcb9448dbb23d3a944f0439c1 is 1080, key is row0255/info:/1732519976099/Put/seqid=0 2024-11-25T07:32:58,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741877_1053 (size=7116) 2024-11-25T07:32:58,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741877_1053 (size=7116) 2024-11-25T07:32:58,122 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/7c2a91fdcb9448dbb23d3a944f0439c1 2024-11-25T07:32:58,127 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/.tmp/info/7c2a91fdcb9448dbb23d3a944f0439c1 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/7c2a91fdcb9448dbb23d3a944f0439c1 2024-11-25T07:32:58,132 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/7c2a91fdcb9448dbb23d3a944f0439c1, entries=2, sequenceid=336, filesize=6.9 K 2024-11-25T07:32:58,133 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 30767e2e8b52b777e66aa2db4c7fcb90 in 20ms, sequenceid=336, compaction requested=true 2024-11-25T07:32:58,133 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 30767e2e8b52b777e66aa2db4c7fcb90: 2024-11-25T07:32:58,133 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-25T07:32:58,137 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/.tmp/info/76030f9898fc443095f5d7fe8c4e1b34 is 186, key is TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73./info:regioninfo/1732519944300/Put/seqid=0 2024-11-25T07:32:58,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741878_1054 (size=6153) 2024-11-25T07:32:58,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741878_1054 (size=6153) 2024-11-25T07:32:58,142 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/.tmp/info/76030f9898fc443095f5d7fe8c4e1b34 2024-11-25T07:32:58,147 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/.tmp/info/76030f9898fc443095f5d7fe8c4e1b34 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/info/76030f9898fc443095f5d7fe8c4e1b34 2024-11-25T07:32:58,151 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/info/76030f9898fc443095f5d7fe8c4e1b34, entries=5, sequenceid=21, filesize=6.0 K 2024-11-25T07:32:58,152 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 19ms, sequenceid=21, compaction requested=false 2024-11-25T07:32:58,152 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-25T07:32:58,152 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 5519f0cd9d3b7b90490198126cbbdc73: 2024-11-25T07:32:58,153 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C33373%2C1732519920437.1732519978153 2024-11-25T07:32:58,157 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,157 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,157 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,157 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,157 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,157 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/WALs/5eb3d201e8c9,33373,1732519920437/5eb3d201e8c9%2C33373%2C1732519920437.1732519978102 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/WALs/5eb3d201e8c9,33373,1732519920437/5eb3d201e8c9%2C33373%2C1732519920437.1732519978153 2024-11-25T07:32:58,158 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36405:36405),(127.0.0.1/127.0.0.1:39055:39055)] 2024-11-25T07:32:58,158 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/WALs/5eb3d201e8c9,33373,1732519920437/5eb3d201e8c9%2C33373%2C1732519920437.1732519978102 is not closed yet, will try archiving it next time 2024-11-25T07:32:58,158 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/WALs/5eb3d201e8c9,33373,1732519920437/5eb3d201e8c9%2C33373%2C1732519920437.1732519920827 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/oldWALs/5eb3d201e8c9%2C33373%2C1732519920437.1732519920827 2024-11-25T07:32:58,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741876_1052 (size=731) 2024-11-25T07:32:58,159 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T07:32:58,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741876_1052 (size=731) 2024-11-25T07:32:58,160 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/WALs/5eb3d201e8c9,33373,1732519920437/5eb3d201e8c9%2C33373%2C1732519920437.1732519978102 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/oldWALs/5eb3d201e8c9%2C33373%2C1732519920437.1732519978102 2024-11-25T07:32:58,259 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T07:32:58,259 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:32:58,259 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:32:58,260 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:32:58,260 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:32:58,260 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T07:32:58,260 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T07:32:58,260 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1622994876, stopped=false 2024-11-25T07:32:58,260 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5eb3d201e8c9,41919,1732519920390 2024-11-25T07:32:58,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:32:58,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:32:58,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:58,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:58,262 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:32:58,262 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:32:58,262 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:32:58,262 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:32:58,262 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5eb3d201e8c9,33373,1732519920437' ***** 2024-11-25T07:32:58,262 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T07:32:58,263 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:32:58,263 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:32:58,263 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T07:32:58,263 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T07:32:58,263 INFO [RS:0;5eb3d201e8c9:33373 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T07:32:58,263 INFO [RS:0;5eb3d201e8c9:33373 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T07:32:58,263 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(3091): Received CLOSE for 30767e2e8b52b777e66aa2db4c7fcb90 2024-11-25T07:32:58,263 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(3091): Received CLOSE for 5519f0cd9d3b7b90490198126cbbdc73 2024-11-25T07:32:58,264 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(959): stopping server 5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:58,264 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:32:58,264 INFO [RS:0;5eb3d201e8c9:33373 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5eb3d201e8c9:33373. 2024-11-25T07:32:58,264 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 30767e2e8b52b777e66aa2db4c7fcb90, disabling compactions & flushes 2024-11-25T07:32:58,264 DEBUG [RS:0;5eb3d201e8c9:33373 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:32:58,264 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:58,264 DEBUG [RS:0;5eb3d201e8c9:33373 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:32:58,264 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:58,264 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. after waiting 0 ms 2024-11-25T07:32:58,264 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T07:32:58,264 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:58,264 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T07:32:58,264 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T07:32:58,264 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T07:32:58,264 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-25T07:32:58,264 DEBUG [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(1325): Online Regions={30767e2e8b52b777e66aa2db4c7fcb90=TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90., 1588230740=hbase:meta,,1.1588230740, 5519f0cd9d3b7b90490198126cbbdc73=TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73.} 2024-11-25T07:32:58,264 DEBUG [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 30767e2e8b52b777e66aa2db4c7fcb90, 5519f0cd9d3b7b90490198126cbbdc73 2024-11-25T07:32:58,264 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:32:58,264 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:32:58,264 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:32:58,264 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:32:58,265 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:32:58,264 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb->hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/89d372ae1d234ef8b4d3ffeb2cf8a443-top, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/ea39497feca54492b0b752b6c2b60d5c, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/TestLogRolling-testLogRolling=991144de426cafcdf6a39be16db812fb-abc0825106b344a1a7be8495fa3aeee6, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/f5ab5672a4f14519b0631135a283a640, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/aef872fd2c9c4651a21d514fe15d5349, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/6641d065c32d4209959262ef79e0fd2c, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/cf2de1f1e433472dba87cff5fc852aa6, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/4a3ea088a07b4e798652d2293ecef539, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/549b93f3ebf54a638a64219037e5ce5c, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/115d2b6846c5487bb7abf18a7850f24c, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/e43db1c136904c2eae02890499f63930, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/5c2cbf7666af4992bd37d256c102a0ac, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/ed5b084b356241abbb5d92d49cc1f9f7, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/0d8bc276c9ca4a549118915008296b17, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/37dd4202d2d24c69a9222c8eace474b2, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/7df6ccc219b54a26864423246e2578ff, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/26dd07a13f8e43c2ba9d575080a84b99, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/08be2041fefd4bdbb574738faf921a3e, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/0726a2b66cf74039ad8ed70e1455ab80, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/d01632a73d5644698aee3672879d133b, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/e47d295025d447dd8edcd8bcb8d6ec5c, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/32d26361f89c44459b8546e417b14a17, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/37dedd7f990943d38d55db4281a457eb] to archive 2024-11-25T07:32:58,266 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T07:32:58,267 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:58,269 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/ea39497feca54492b0b752b6c2b60d5c to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/ea39497feca54492b0b752b6c2b60d5c 2024-11-25T07:32:58,269 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-25T07:32:58,269 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:32:58,270 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:32:58,270 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519978264Running coprocessor pre-close hooks at 1732519978264Disabling compacts and flushes for region at 1732519978264Disabling writes for close at 1732519978265 (+1 ms)Writing region close event to WAL at 1732519978266 (+1 ms)Running coprocessor post-close hooks at 1732519978269 (+3 ms)Closed at 1732519978270 (+1 ms) 2024-11-25T07:32:58,270 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T07:32:58,270 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/TestLogRolling-testLogRolling=991144de426cafcdf6a39be16db812fb-abc0825106b344a1a7be8495fa3aeee6 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/TestLogRolling-testLogRolling=991144de426cafcdf6a39be16db812fb-abc0825106b344a1a7be8495fa3aeee6 2024-11-25T07:32:58,271 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/f5ab5672a4f14519b0631135a283a640 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/f5ab5672a4f14519b0631135a283a640 2024-11-25T07:32:58,272 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/aef872fd2c9c4651a21d514fe15d5349 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/aef872fd2c9c4651a21d514fe15d5349 2024-11-25T07:32:58,273 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/6641d065c32d4209959262ef79e0fd2c to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/6641d065c32d4209959262ef79e0fd2c 2024-11-25T07:32:58,274 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/cf2de1f1e433472dba87cff5fc852aa6 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/cf2de1f1e433472dba87cff5fc852aa6 2024-11-25T07:32:58,275 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/4a3ea088a07b4e798652d2293ecef539 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/4a3ea088a07b4e798652d2293ecef539 2024-11-25T07:32:58,276 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/549b93f3ebf54a638a64219037e5ce5c to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/549b93f3ebf54a638a64219037e5ce5c 2024-11-25T07:32:58,277 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/115d2b6846c5487bb7abf18a7850f24c to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/115d2b6846c5487bb7abf18a7850f24c 2024-11-25T07:32:58,278 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/e43db1c136904c2eae02890499f63930 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/e43db1c136904c2eae02890499f63930 2024-11-25T07:32:58,279 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/5c2cbf7666af4992bd37d256c102a0ac to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/5c2cbf7666af4992bd37d256c102a0ac 2024-11-25T07:32:58,280 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/ed5b084b356241abbb5d92d49cc1f9f7 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/ed5b084b356241abbb5d92d49cc1f9f7 2024-11-25T07:32:58,281 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/0d8bc276c9ca4a549118915008296b17 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/0d8bc276c9ca4a549118915008296b17 2024-11-25T07:32:58,282 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/37dd4202d2d24c69a9222c8eace474b2 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/37dd4202d2d24c69a9222c8eace474b2 2024-11-25T07:32:58,283 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/7df6ccc219b54a26864423246e2578ff to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/7df6ccc219b54a26864423246e2578ff 2024-11-25T07:32:58,284 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/26dd07a13f8e43c2ba9d575080a84b99 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/26dd07a13f8e43c2ba9d575080a84b99 2024-11-25T07:32:58,285 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/08be2041fefd4bdbb574738faf921a3e to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/08be2041fefd4bdbb574738faf921a3e 2024-11-25T07:32:58,286 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/0726a2b66cf74039ad8ed70e1455ab80 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/0726a2b66cf74039ad8ed70e1455ab80 2024-11-25T07:32:58,287 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/d01632a73d5644698aee3672879d133b to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/d01632a73d5644698aee3672879d133b 2024-11-25T07:32:58,288 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/e47d295025d447dd8edcd8bcb8d6ec5c to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/e47d295025d447dd8edcd8bcb8d6ec5c 2024-11-25T07:32:58,289 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/32d26361f89c44459b8546e417b14a17 to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/32d26361f89c44459b8546e417b14a17 2024-11-25T07:32:58,290 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/37dedd7f990943d38d55db4281a457eb to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/info/37dedd7f990943d38d55db4281a457eb 2024-11-25T07:32:58,290 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5eb3d201e8c9:41919 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-25T07:32:58,290 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [ea39497feca54492b0b752b6c2b60d5c=8359, f5ab5672a4f14519b0631135a283a640=12509, aef872fd2c9c4651a21d514fe15d5349=32183, 6641d065c32d4209959262ef79e0fd2c=21141, cf2de1f1e433472dba87cff5fc852aa6=16819, 4a3ea088a07b4e798652d2293ecef539=63642, 549b93f3ebf54a638a64219037e5ce5c=24394, 115d2b6846c5487bb7abf18a7850f24c=17906, e43db1c136904c2eae02890499f63930=84299, 5c2cbf7666af4992bd37d256c102a0ac=12516, ed5b084b356241abbb5d92d49cc1f9f7=21156, 0d8bc276c9ca4a549118915008296b17=113515, 37dd4202d2d24c69a9222c8eace474b2=17906, 7df6ccc219b54a26864423246e2578ff=23316, 26dd07a13f8e43c2ba9d575080a84b99=147240, 08be2041fefd4bdbb574738faf921a3e=20078, 0726a2b66cf74039ad8ed70e1455ab80=12522, d01632a73d5644698aee3672879d133b=171087, e47d295025d447dd8edcd8bcb8d6ec5c=21171, 32d26361f89c44459b8546e417b14a17=16839, 37dedd7f990943d38d55db4281a457eb=20092] 2024-11-25T07:32:58,294 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/30767e2e8b52b777e66aa2db4c7fcb90/recovered.edits/339.seqid, newMaxSeqId=339, maxSeqId=88 2024-11-25T07:32:58,294 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:58,294 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 30767e2e8b52b777e66aa2db4c7fcb90: Waiting for close lock at 1732519978264Running coprocessor pre-close hooks at 1732519978264Disabling compacts and flushes for region at 1732519978264Disabling writes for close at 1732519978264Writing region close event to WAL at 1732519978291 (+27 ms)Running coprocessor post-close hooks at 1732519978294 (+3 ms)Closed at 1732519978294 2024-11-25T07:32:58,294 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732519943641.30767e2e8b52b777e66aa2db4c7fcb90. 2024-11-25T07:32:58,294 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5519f0cd9d3b7b90490198126cbbdc73, disabling compactions & flushes 2024-11-25T07:32:58,294 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73. 2024-11-25T07:32:58,294 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73. 2024-11-25T07:32:58,294 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73. after waiting 0 ms 2024-11-25T07:32:58,294 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73. 2024-11-25T07:32:58,295 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73/info/89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb->hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/991144de426cafcdf6a39be16db812fb/info/89d372ae1d234ef8b4d3ffeb2cf8a443-bottom] to archive 2024-11-25T07:32:58,295 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T07:32:58,296 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73/info/89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb to hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/archive/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73/info/89d372ae1d234ef8b4d3ffeb2cf8a443.991144de426cafcdf6a39be16db812fb 2024-11-25T07:32:58,297 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-25T07:32:58,299 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/data/default/TestLogRolling-testLogRolling/5519f0cd9d3b7b90490198126cbbdc73/recovered.edits/93.seqid, newMaxSeqId=93, maxSeqId=88 2024-11-25T07:32:58,300 INFO [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73. 2024-11-25T07:32:58,300 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5519f0cd9d3b7b90490198126cbbdc73: Waiting for close lock at 1732519978294Running coprocessor pre-close hooks at 1732519978294Disabling compacts and flushes for region at 1732519978294Disabling writes for close at 1732519978294Writing region close event to WAL at 1732519978297 (+3 ms)Running coprocessor post-close hooks at 1732519978300 (+3 ms)Closed at 1732519978300 2024-11-25T07:32:58,300 DEBUG [RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732519943641.5519f0cd9d3b7b90490198126cbbdc73. 2024-11-25T07:32:58,464 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(976): stopping server 5eb3d201e8c9,33373,1732519920437; all regions closed. 2024-11-25T07:32:58,465 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,465 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,465 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,465 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,465 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741834_1010 (size=8107) 2024-11-25T07:32:58,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741834_1010 (size=8107) 2024-11-25T07:32:58,469 DEBUG [RS:0;5eb3d201e8c9:33373 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/oldWALs 2024-11-25T07:32:58,469 INFO [RS:0;5eb3d201e8c9:33373 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C33373%2C1732519920437.meta:.meta(num 1732519921217) 2024-11-25T07:32:58,470 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,470 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,470 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,470 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,470 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741879_1055 (size=778) 2024-11-25T07:32:58,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741879_1055 (size=778) 2024-11-25T07:32:58,473 DEBUG [RS:0;5eb3d201e8c9:33373 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/oldWALs 2024-11-25T07:32:58,473 INFO [RS:0;5eb3d201e8c9:33373 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C33373%2C1732519920437:(num 1732519978153) 2024-11-25T07:32:58,473 DEBUG [RS:0;5eb3d201e8c9:33373 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:32:58,473 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:32:58,473 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:32:58,473 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.ChoreService(370): Chore service for: regionserver/5eb3d201e8c9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T07:32:58,474 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:32:58,474 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:32:58,474 INFO [RS:0;5eb3d201e8c9:33373 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33373 2024-11-25T07:32:58,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5eb3d201e8c9,33373,1732519920437 2024-11-25T07:32:58,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:32:58,476 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:32:58,478 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5eb3d201e8c9,33373,1732519920437] 2024-11-25T07:32:58,479 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5eb3d201e8c9,33373,1732519920437 already deleted, retry=false 2024-11-25T07:32:58,479 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5eb3d201e8c9,33373,1732519920437 expired; onlineServers=0 2024-11-25T07:32:58,479 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5eb3d201e8c9,41919,1732519920390' ***** 2024-11-25T07:32:58,479 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T07:32:58,480 INFO [M:0;5eb3d201e8c9:41919 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:32:58,480 INFO [M:0;5eb3d201e8c9:41919 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:32:58,480 DEBUG [M:0;5eb3d201e8c9:41919 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T07:32:58,480 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T07:32:58,480 DEBUG [M:0;5eb3d201e8c9:41919 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T07:32:58,480 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519920615 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519920615,5,FailOnTimeoutGroup] 2024-11-25T07:32:58,480 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519920614 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519920614,5,FailOnTimeoutGroup] 2024-11-25T07:32:58,480 INFO [M:0;5eb3d201e8c9:41919 {}] hbase.ChoreService(370): Chore service for: master/5eb3d201e8c9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T07:32:58,480 INFO [M:0;5eb3d201e8c9:41919 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:32:58,480 DEBUG [M:0;5eb3d201e8c9:41919 {}] master.HMaster(1795): Stopping service threads 2024-11-25T07:32:58,480 INFO [M:0;5eb3d201e8c9:41919 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T07:32:58,480 INFO [M:0;5eb3d201e8c9:41919 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:32:58,480 INFO [M:0;5eb3d201e8c9:41919 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T07:32:58,480 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T07:32:58,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T07:32:58,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:58,481 DEBUG [M:0;5eb3d201e8c9:41919 {}] zookeeper.ZKUtil(347): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T07:32:58,481 WARN [M:0;5eb3d201e8c9:41919 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T07:32:58,482 INFO [M:0;5eb3d201e8c9:41919 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/.lastflushedseqids 2024-11-25T07:32:58,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741880_1056 (size=228) 2024-11-25T07:32:58,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741880_1056 (size=228) 2024-11-25T07:32:58,488 INFO [M:0;5eb3d201e8c9:41919 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T07:32:58,488 INFO [M:0;5eb3d201e8c9:41919 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T07:32:58,488 DEBUG [M:0;5eb3d201e8c9:41919 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:32:58,488 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:32:58,488 DEBUG [M:0;5eb3d201e8c9:41919 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:32:58,488 DEBUG [M:0;5eb3d201e8c9:41919 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:32:58,488 DEBUG [M:0;5eb3d201e8c9:41919 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:32:58,488 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-25T07:32:58,504 DEBUG [M:0;5eb3d201e8c9:41919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/62f453dc534d475d97fc85cfa9e52273 is 82, key is hbase:meta,,1/info:regioninfo/1732519921246/Put/seqid=0 2024-11-25T07:32:58,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741881_1057 (size=5672) 2024-11-25T07:32:58,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741881_1057 (size=5672) 2024-11-25T07:32:58,509 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/62f453dc534d475d97fc85cfa9e52273 2024-11-25T07:32:58,527 DEBUG [M:0;5eb3d201e8c9:41919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/03f2ccdc43cb4dcba681c548168028ab is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732519921735/Put/seqid=0 2024-11-25T07:32:58,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741882_1058 (size=7090) 2024-11-25T07:32:58,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741882_1058 (size=7090) 2024-11-25T07:32:58,532 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/03f2ccdc43cb4dcba681c548168028ab 2024-11-25T07:32:58,535 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 03f2ccdc43cb4dcba681c548168028ab 2024-11-25T07:32:58,549 DEBUG [M:0;5eb3d201e8c9:41919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/10686f9f513241a09eb1d61407227ff1 is 69, key is 5eb3d201e8c9,33373,1732519920437/rs:state/1732519920678/Put/seqid=0 2024-11-25T07:32:58,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741883_1059 (size=5156) 2024-11-25T07:32:58,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741883_1059 (size=5156) 2024-11-25T07:32:58,554 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/10686f9f513241a09eb1d61407227ff1 2024-11-25T07:32:58,578 DEBUG [M:0;5eb3d201e8c9:41919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6af45f4c22d046d5b5fb505f5253f852 is 52, key is load_balancer_on/state:d/1732519921367/Put/seqid=0 2024-11-25T07:32:58,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:32:58,578 INFO [RS:0;5eb3d201e8c9:33373 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:32:58,578 INFO [RS:0;5eb3d201e8c9:33373 {}] regionserver.HRegionServer(1031): Exiting; stopping=5eb3d201e8c9,33373,1732519920437; zookeeper connection closed. 2024-11-25T07:32:58,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33373-0x1014e09ed880001, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:32:58,579 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6ec20752 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6ec20752 2024-11-25T07:32:58,579 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T07:32:58,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741884_1060 (size=5056) 2024-11-25T07:32:58,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741884_1060 (size=5056) 2024-11-25T07:32:58,582 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6af45f4c22d046d5b5fb505f5253f852 2024-11-25T07:32:58,587 DEBUG [M:0;5eb3d201e8c9:41919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/62f453dc534d475d97fc85cfa9e52273 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/62f453dc534d475d97fc85cfa9e52273 2024-11-25T07:32:58,591 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/62f453dc534d475d97fc85cfa9e52273, entries=8, sequenceid=125, filesize=5.5 K 2024-11-25T07:32:58,592 DEBUG [M:0;5eb3d201e8c9:41919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/03f2ccdc43cb4dcba681c548168028ab as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/03f2ccdc43cb4dcba681c548168028ab 2024-11-25T07:32:58,596 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 03f2ccdc43cb4dcba681c548168028ab 2024-11-25T07:32:58,596 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/03f2ccdc43cb4dcba681c548168028ab, entries=13, sequenceid=125, filesize=6.9 K 2024-11-25T07:32:58,596 DEBUG [M:0;5eb3d201e8c9:41919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/10686f9f513241a09eb1d61407227ff1 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/10686f9f513241a09eb1d61407227ff1 2024-11-25T07:32:58,600 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/10686f9f513241a09eb1d61407227ff1, entries=1, sequenceid=125, filesize=5.0 K 2024-11-25T07:32:58,600 DEBUG [M:0;5eb3d201e8c9:41919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6af45f4c22d046d5b5fb505f5253f852 as hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6af45f4c22d046d5b5fb505f5253f852 2024-11-25T07:32:58,604 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37267/user/jenkins/test-data/3a65863a-4c09-dcb1-f94e-3f1b82cbd26f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6af45f4c22d046d5b5fb505f5253f852, entries=1, sequenceid=125, filesize=4.9 K 2024-11-25T07:32:58,605 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=125, compaction requested=false 2024-11-25T07:32:58,606 INFO [M:0;5eb3d201e8c9:41919 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:32:58,606 DEBUG [M:0;5eb3d201e8c9:41919 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519978488Disabling compacts and flushes for region at 1732519978488Disabling writes for close at 1732519978488Obtaining lock to block concurrent updates at 1732519978488Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732519978488Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1732519978488Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732519978489 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732519978489Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732519978504 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732519978504Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732519978513 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732519978527 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732519978527Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732519978536 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732519978549 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732519978549Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732519978557 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732519978577 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732519978577Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3810be55: reopening flushed file at 1732519978586 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64b642a1: reopening flushed file at 1732519978591 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@147861a6: reopening flushed file at 1732519978596 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a133955: reopening flushed file at 1732519978600 (+4 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=125, compaction requested=false at 1732519978605 (+5 ms)Writing region close event to WAL at 1732519978606 (+1 ms)Closed at 1732519978606 2024-11-25T07:32:58,606 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,607 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,607 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,607 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,607 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:32:58,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35065 is added to blk_1073741830_1006 (size=61320) 2024-11-25T07:32:58,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35031 is added to blk_1073741830_1006 (size=61320) 2024-11-25T07:32:58,609 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:32:58,609 INFO [M:0;5eb3d201e8c9:41919 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T07:32:58,609 INFO [M:0;5eb3d201e8c9:41919 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41919 2024-11-25T07:32:58,609 INFO [M:0;5eb3d201e8c9:41919 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:32:58,694 INFO [regionserver/5eb3d201e8c9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:32:58,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:32:58,711 INFO [M:0;5eb3d201e8c9:41919 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:32:58,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41919-0x1014e09ed880000, quorum=127.0.0.1:55992, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:32:58,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ae6275a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:32:58,714 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a34980e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:32:58,714 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:32:58,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26a9d62d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:32:58,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25dcc129{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/hadoop.log.dir/,STOPPED} 2024-11-25T07:32:58,716 WARN [BP-1014115906-172.17.0.2-1732519919568 heartbeating to localhost/127.0.0.1:37267 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:32:58,716 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:32:58,716 WARN [BP-1014115906-172.17.0.2-1732519919568 heartbeating to localhost/127.0.0.1:37267 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1014115906-172.17.0.2-1732519919568 (Datanode Uuid cf91bf23-96cc-46eb-97b3-269ddd361ff1) service to localhost/127.0.0.1:37267 2024-11-25T07:32:58,716 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:32:58,716 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/cluster_1bca10b4-48f0-b93a-883b-e5b52990cf9d/data/data3/current/BP-1014115906-172.17.0.2-1732519919568 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:32:58,717 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/cluster_1bca10b4-48f0-b93a-883b-e5b52990cf9d/data/data4/current/BP-1014115906-172.17.0.2-1732519919568 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:32:58,717 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:32:58,719 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f932cc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:32:58,719 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ff7780b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:32:58,719 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:32:58,719 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2735da07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:32:58,719 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16ccf5f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/hadoop.log.dir/,STOPPED} 2024-11-25T07:32:58,720 WARN [BP-1014115906-172.17.0.2-1732519919568 heartbeating to localhost/127.0.0.1:37267 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:32:58,721 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:32:58,721 WARN [BP-1014115906-172.17.0.2-1732519919568 heartbeating to localhost/127.0.0.1:37267 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1014115906-172.17.0.2-1732519919568 (Datanode Uuid 4911d89d-b9a8-4853-9f1e-54d200d1830d) service to localhost/127.0.0.1:37267 2024-11-25T07:32:58,721 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:32:58,721 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/cluster_1bca10b4-48f0-b93a-883b-e5b52990cf9d/data/data1/current/BP-1014115906-172.17.0.2-1732519919568 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:32:58,721 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/cluster_1bca10b4-48f0-b93a-883b-e5b52990cf9d/data/data2/current/BP-1014115906-172.17.0.2-1732519919568 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:32:58,721 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:32:58,727 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1520cb76{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:32:58,728 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65b546b1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:32:58,728 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:32:58,728 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5327e2a9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:32:58,728 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33382c80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/hadoop.log.dir/,STOPPED} 2024-11-25T07:32:58,735 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T07:32:58,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T07:32:58,775 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 205) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:37267 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:37267 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:37267 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37267 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37267 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:37267 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37267 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37267 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=518 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=51 (was 61), ProcessCount=11 (was 11), AvailableMemoryMB=7920 (was 7970) 2024-11-25T07:32:58,782 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=518, MaxFileDescriptor=1048576, SystemLoadAverage=51, ProcessCount=11, AvailableMemoryMB=7920 2024-11-25T07:32:58,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T07:32:58,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/hadoop.log.dir so I do NOT create it in target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e 2024-11-25T07:32:58,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d6286e1-4adc-1e74-ce6f-707462e232ae/hadoop.tmp.dir so I do NOT create it in target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e 2024-11-25T07:32:58,783 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/cluster_65c7b312-8e93-1fce-8e40-0e646737ae8f, deleteOnExit=true 2024-11-25T07:32:58,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T07:32:58,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/test.cache.data in system properties and HBase conf 2024-11-25T07:32:58,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T07:32:58,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/hadoop.log.dir in system properties and HBase conf 2024-11-25T07:32:58,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T07:32:58,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T07:32:58,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T07:32:58,784 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T07:32:58,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:32:58,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T07:32:58,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T07:32:58,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:32:58,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T07:32:58,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T07:32:58,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T07:32:58,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:32:58,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T07:32:58,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/nfs.dump.dir in system properties and HBase conf 2024-11-25T07:32:58,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/java.io.tmpdir in system properties and HBase conf 2024-11-25T07:32:58,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T07:32:58,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T07:32:58,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T07:32:58,805 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:32:58,860 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:32:58,863 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:32:58,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:32:58,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:32:58,865 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:32:58,865 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:32:58,868 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@309e84ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:32:58,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ed058d1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:32:58,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:58,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:58,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@40f3733a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/java.io.tmpdir/jetty-localhost-45421-hadoop-hdfs-3_4_1-tests_jar-_-any-16884436037761247266/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:32:58,982 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c3c893{HTTP/1.1, (http/1.1)}{localhost:45421} 2024-11-25T07:32:58,982 INFO [Time-limited test {}] server.Server(415): Started @298829ms 2024-11-25T07:32:58,995 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T07:32:59,049 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:32:59,051 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:32:59,052 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:32:59,052 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:32:59,052 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:32:59,052 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a86d190{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:32:59,052 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a7b167c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:32:59,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@186f146f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/java.io.tmpdir/jetty-localhost-33271-hadoop-hdfs-3_4_1-tests_jar-_-any-2908010991580381353/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:32:59,167 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@231297c7{HTTP/1.1, (http/1.1)}{localhost:33271} 2024-11-25T07:32:59,167 INFO [Time-limited test {}] server.Server(415): Started @299014ms 2024-11-25T07:32:59,168 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:32:59,197 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T07:32:59,200 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T07:32:59,200 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T07:32:59,200 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T07:32:59,200 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T07:32:59,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5dba136e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/hadoop.log.dir/,AVAILABLE} 2024-11-25T07:32:59,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2629cfe0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T07:32:59,278 WARN [Thread-2481 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/cluster_65c7b312-8e93-1fce-8e40-0e646737ae8f/data/data1/current/BP-891294055-172.17.0.2-1732519978811/current, will proceed with Du for space computation calculation, 2024-11-25T07:32:59,278 WARN [Thread-2482 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/cluster_65c7b312-8e93-1fce-8e40-0e646737ae8f/data/data2/current/BP-891294055-172.17.0.2-1732519978811/current, will proceed with Du for space computation calculation, 2024-11-25T07:32:59,305 WARN [Thread-2460 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:32:59,307 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf9a5981351d461da with lease ID 0x4fa5ca2bc4789d06: Processing first storage report for DS-6962486d-f2d3-4cee-8441-730170f486ac from datanode DatanodeRegistration(127.0.0.1:34361, datanodeUuid=76b2160d-98a3-410c-bb97-ec3551e3c600, infoPort=36271, infoSecurePort=0, ipcPort=41385, storageInfo=lv=-57;cid=testClusterID;nsid=443486215;c=1732519978811) 2024-11-25T07:32:59,307 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf9a5981351d461da with lease ID 0x4fa5ca2bc4789d06: from storage DS-6962486d-f2d3-4cee-8441-730170f486ac node DatanodeRegistration(127.0.0.1:34361, datanodeUuid=76b2160d-98a3-410c-bb97-ec3551e3c600, infoPort=36271, infoSecurePort=0, ipcPort=41385, storageInfo=lv=-57;cid=testClusterID;nsid=443486215;c=1732519978811), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:32:59,307 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf9a5981351d461da with lease ID 0x4fa5ca2bc4789d06: Processing first storage report for DS-f4638f14-0b4d-4801-bbbc-bb14cf550def from datanode DatanodeRegistration(127.0.0.1:34361, datanodeUuid=76b2160d-98a3-410c-bb97-ec3551e3c600, infoPort=36271, infoSecurePort=0, ipcPort=41385, storageInfo=lv=-57;cid=testClusterID;nsid=443486215;c=1732519978811) 2024-11-25T07:32:59,307 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf9a5981351d461da with lease ID 0x4fa5ca2bc4789d06: from storage DS-f4638f14-0b4d-4801-bbbc-bb14cf550def node DatanodeRegistration(127.0.0.1:34361, datanodeUuid=76b2160d-98a3-410c-bb97-ec3551e3c600, infoPort=36271, infoSecurePort=0, ipcPort=41385, storageInfo=lv=-57;cid=testClusterID;nsid=443486215;c=1732519978811), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:32:59,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@554afc70{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/java.io.tmpdir/jetty-localhost-35169-hadoop-hdfs-3_4_1-tests_jar-_-any-18359582342218531688/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:32:59,321 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@cbcac8c{HTTP/1.1, (http/1.1)}{localhost:35169} 2024-11-25T07:32:59,321 INFO [Time-limited test {}] server.Server(415): Started @299169ms 2024-11-25T07:32:59,323 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T07:32:59,423 WARN [Thread-2507 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/cluster_65c7b312-8e93-1fce-8e40-0e646737ae8f/data/data3/current/BP-891294055-172.17.0.2-1732519978811/current, will proceed with Du for space computation calculation, 2024-11-25T07:32:59,423 WARN [Thread-2508 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/cluster_65c7b312-8e93-1fce-8e40-0e646737ae8f/data/data4/current/BP-891294055-172.17.0.2-1732519978811/current, will proceed with Du for space computation calculation, 2024-11-25T07:32:59,439 WARN [Thread-2496 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T07:32:59,441 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa3a54aef56e260e with lease ID 0x4fa5ca2bc4789d07: Processing first storage report for DS-372ea0c5-72bd-4e45-aa0f-f2f6a813d954 from datanode DatanodeRegistration(127.0.0.1:36327, datanodeUuid=9d8ccbe8-796a-4adc-81f0-f20febddf88e, infoPort=42249, infoSecurePort=0, ipcPort=46755, storageInfo=lv=-57;cid=testClusterID;nsid=443486215;c=1732519978811) 2024-11-25T07:32:59,441 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3a54aef56e260e with lease ID 0x4fa5ca2bc4789d07: from storage DS-372ea0c5-72bd-4e45-aa0f-f2f6a813d954 node DatanodeRegistration(127.0.0.1:36327, datanodeUuid=9d8ccbe8-796a-4adc-81f0-f20febddf88e, infoPort=42249, infoSecurePort=0, ipcPort=46755, storageInfo=lv=-57;cid=testClusterID;nsid=443486215;c=1732519978811), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:32:59,441 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa3a54aef56e260e with lease ID 0x4fa5ca2bc4789d07: Processing first storage report for DS-bc8a321e-8528-4ec5-b4c3-e73d06c7e6f2 from datanode DatanodeRegistration(127.0.0.1:36327, datanodeUuid=9d8ccbe8-796a-4adc-81f0-f20febddf88e, infoPort=42249, infoSecurePort=0, ipcPort=46755, storageInfo=lv=-57;cid=testClusterID;nsid=443486215;c=1732519978811) 2024-11-25T07:32:59,441 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3a54aef56e260e with lease ID 0x4fa5ca2bc4789d07: from storage DS-bc8a321e-8528-4ec5-b4c3-e73d06c7e6f2 node DatanodeRegistration(127.0.0.1:36327, datanodeUuid=9d8ccbe8-796a-4adc-81f0-f20febddf88e, infoPort=42249, infoSecurePort=0, ipcPort=46755, storageInfo=lv=-57;cid=testClusterID;nsid=443486215;c=1732519978811), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T07:32:59,442 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e 2024-11-25T07:32:59,444 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/cluster_65c7b312-8e93-1fce-8e40-0e646737ae8f/zookeeper_0, clientPort=64966, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/cluster_65c7b312-8e93-1fce-8e40-0e646737ae8f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/cluster_65c7b312-8e93-1fce-8e40-0e646737ae8f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T07:32:59,445 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64966 2024-11-25T07:32:59,445 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:59,446 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:59,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:32:59,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741825_1001 (size=7) 2024-11-25T07:32:59,455 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89 with version=8 2024-11-25T07:32:59,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34173/user/jenkins/test-data/129092fd-68f8-569c-ec91-d1d9901a81c6/hbase-staging 2024-11-25T07:32:59,456 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:32:59,456 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:32:59,456 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:32:59,457 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:32:59,457 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:32:59,457 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:32:59,457 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T07:32:59,457 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:32:59,457 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37987 2024-11-25T07:32:59,458 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37987 connecting to ZooKeeper ensemble=127.0.0.1:64966 2024-11-25T07:32:59,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:379870x0, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:32:59,465 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37987-0x1014e0ad4430000 connected 2024-11-25T07:32:59,477 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:59,478 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:59,479 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:32:59,479 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89, hbase.cluster.distributed=false 2024-11-25T07:32:59,481 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:32:59,481 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37987 2024-11-25T07:32:59,482 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37987 2024-11-25T07:32:59,483 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37987 2024-11-25T07:32:59,483 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37987 2024-11-25T07:32:59,483 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37987 2024-11-25T07:32:59,497 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5eb3d201e8c9:0 server-side Connection retries=45 2024-11-25T07:32:59,497 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:32:59,498 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T07:32:59,498 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T07:32:59,498 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T07:32:59,498 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T07:32:59,498 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T07:32:59,498 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T07:32:59,498 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39981 2024-11-25T07:32:59,499 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39981 connecting to ZooKeeper ensemble=127.0.0.1:64966 2024-11-25T07:32:59,500 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:59,501 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:59,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:399810x0, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T07:32:59,508 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39981-0x1014e0ad4430001 connected 2024-11-25T07:32:59,508 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:32:59,509 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T07:32:59,509 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T07:32:59,510 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T07:32:59,510 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T07:32:59,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39981 2024-11-25T07:32:59,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39981 2024-11-25T07:32:59,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39981 2024-11-25T07:32:59,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39981 2024-11-25T07:32:59,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39981 2024-11-25T07:32:59,522 DEBUG [M:0;5eb3d201e8c9:37987 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5eb3d201e8c9:37987 2024-11-25T07:32:59,523 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5eb3d201e8c9,37987,1732519979456 2024-11-25T07:32:59,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:32:59,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:32:59,535 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5eb3d201e8c9,37987,1732519979456 2024-11-25T07:32:59,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T07:32:59,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:59,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:59,547 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T07:32:59,547 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5eb3d201e8c9,37987,1732519979456 from backup master directory 2024-11-25T07:32:59,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5eb3d201e8c9,37987,1732519979456 2024-11-25T07:32:59,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:32:59,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T07:32:59,561 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:32:59,561 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5eb3d201e8c9,37987,1732519979456 2024-11-25T07:32:59,564 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/hbase.id] with ID: 77fcc343-5067-46c0-b2aa-b92b8322cf33 2024-11-25T07:32:59,564 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/.tmp/hbase.id 2024-11-25T07:32:59,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:32:59,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741826_1002 (size=42) 2024-11-25T07:32:59,570 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/.tmp/hbase.id]:[hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/hbase.id] 2024-11-25T07:32:59,579 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:32:59,579 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T07:32:59,580 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T07:32:59,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:59,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:59,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:32:59,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741827_1003 (size=196) 2024-11-25T07:32:59,595 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T07:32:59,596 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T07:32:59,596 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:32:59,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:32:59,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741828_1004 (size=1189) 2024-11-25T07:32:59,603 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store 2024-11-25T07:32:59,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:32:59,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741829_1005 (size=34) 2024-11-25T07:32:59,608 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:32:59,608 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:32:59,608 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:32:59,608 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:32:59,608 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:32:59,608 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:32:59,608 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:32:59,608 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519979608Disabling compacts and flushes for region at 1732519979608Disabling writes for close at 1732519979608Writing region close event to WAL at 1732519979608Closed at 1732519979608 2024-11-25T07:32:59,609 WARN [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/.initializing 2024-11-25T07:32:59,609 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/WALs/5eb3d201e8c9,37987,1732519979456 2024-11-25T07:32:59,611 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C37987%2C1732519979456, suffix=, logDir=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/WALs/5eb3d201e8c9,37987,1732519979456, archiveDir=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/oldWALs, maxLogs=10 2024-11-25T07:32:59,611 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C37987%2C1732519979456.1732519979611 2024-11-25T07:32:59,615 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/WALs/5eb3d201e8c9,37987,1732519979456/5eb3d201e8c9%2C37987%2C1732519979456.1732519979611 2024-11-25T07:32:59,616 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36271:36271),(127.0.0.1/127.0.0.1:42249:42249)] 2024-11-25T07:32:59,617 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:32:59,618 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:32:59,618 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:59,618 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:59,619 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:59,620 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T07:32:59,620 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:59,621 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:59,621 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:59,622 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T07:32:59,622 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:59,622 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:32:59,622 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:59,623 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T07:32:59,623 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:59,624 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:32:59,624 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:59,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T07:32:59,625 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:59,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T07:32:59,625 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:59,626 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:59,626 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:59,627 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:59,627 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:59,627 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T07:32:59,628 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T07:32:59,630 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:32:59,630 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805594, jitterRate=0.024366453289985657}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T07:32:59,631 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732519979618Initializing all the Stores at 1732519979618Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519979619 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519979619Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519979619Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519979619Cleaning up temporary data from old regions at 1732519979627 (+8 ms)Region opened successfully at 1732519979631 (+4 ms) 2024-11-25T07:32:59,631 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T07:32:59,633 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e4b5c18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:32:59,634 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T07:32:59,634 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T07:32:59,634 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T07:32:59,634 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T07:32:59,635 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T07:32:59,635 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T07:32:59,635 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T07:32:59,638 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T07:32:59,638 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T07:32:59,642 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T07:32:59,642 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T07:32:59,642 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T07:32:59,645 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T07:32:59,645 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T07:32:59,647 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T07:32:59,648 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T07:32:59,649 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T07:32:59,652 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T07:32:59,654 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T07:32:59,655 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T07:32:59,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:32:59,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T07:32:59,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:59,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:59,656 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5eb3d201e8c9,37987,1732519979456, sessionid=0x1014e0ad4430000, setting cluster-up flag (Was=false) 2024-11-25T07:32:59,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:59,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:59,664 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T07:32:59,665 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,37987,1732519979456 2024-11-25T07:32:59,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:59,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:32:59,674 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T07:32:59,675 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5eb3d201e8c9,37987,1732519979456 2024-11-25T07:32:59,675 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T07:32:59,677 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T07:32:59,677 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T07:32:59,677 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T07:32:59,677 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5eb3d201e8c9,37987,1732519979456 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T07:32:59,678 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:32:59,678 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:32:59,678 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:32:59,678 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=5, maxPoolSize=5 2024-11-25T07:32:59,678 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5eb3d201e8c9:0, corePoolSize=10, maxPoolSize=10 2024-11-25T07:32:59,678 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:59,678 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:32:59,678 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:59,679 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732520009679 2024-11-25T07:32:59,679 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T07:32:59,679 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T07:32:59,679 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T07:32:59,679 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T07:32:59,679 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T07:32:59,679 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T07:32:59,680 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,680 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:32:59,680 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T07:32:59,680 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T07:32:59,680 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T07:32:59,680 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T07:32:59,680 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T07:32:59,680 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T07:32:59,681 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:59,681 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T07:32:59,681 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519979680,5,FailOnTimeoutGroup] 2024-11-25T07:32:59,682 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519979681,5,FailOnTimeoutGroup] 2024-11-25T07:32:59,682 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,682 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T07:32:59,682 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,682 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:32:59,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741831_1007 (size=1321) 2024-11-25T07:32:59,687 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T07:32:59,687 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89 2024-11-25T07:32:59,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741832_1008 (size=32) 2024-11-25T07:32:59,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741832_1008 (size=32) 2024-11-25T07:32:59,692 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:32:59,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:32:59,694 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:32:59,694 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:59,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:59,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:32:59,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:32:59,695 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:59,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:59,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:32:59,696 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:32:59,696 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:59,697 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:59,697 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:32:59,698 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:32:59,698 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:32:59,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:32:59,698 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:32:59,699 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/data/hbase/meta/1588230740 2024-11-25T07:32:59,699 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/data/hbase/meta/1588230740 2024-11-25T07:32:59,700 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:32:59,700 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:32:59,700 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:32:59,701 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:32:59,702 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T07:32:59,703 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777139, jitterRate=-0.011817052960395813}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:32:59,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732519979692Initializing all the Stores at 1732519979693 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519979693Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519979693Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519979693Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519979693Cleaning up temporary data from old regions at 1732519979700 (+7 ms)Region opened successfully at 1732519979703 (+3 ms) 2024-11-25T07:32:59,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:32:59,703 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:32:59,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:32:59,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:32:59,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:32:59,704 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:32:59,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519979703Disabling compacts and flushes for region at 1732519979703Disabling writes for close at 1732519979703Writing region close event to WAL at 1732519979704 (+1 ms)Closed at 1732519979704 2024-11-25T07:32:59,705 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:32:59,705 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T07:32:59,705 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T07:32:59,706 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:32:59,707 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T07:32:59,713 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(746): ClusterId : 77fcc343-5067-46c0-b2aa-b92b8322cf33 2024-11-25T07:32:59,713 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T07:32:59,714 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T07:32:59,714 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T07:32:59,716 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T07:32:59,716 DEBUG [RS:0;5eb3d201e8c9:39981 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51caad3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5eb3d201e8c9/172.17.0.2:0 2024-11-25T07:32:59,728 DEBUG [RS:0;5eb3d201e8c9:39981 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5eb3d201e8c9:39981 2024-11-25T07:32:59,728 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T07:32:59,728 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T07:32:59,728 DEBUG [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T07:32:59,729 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(2659): reportForDuty to master=5eb3d201e8c9,37987,1732519979456 with port=39981, startcode=1732519979497 2024-11-25T07:32:59,729 DEBUG [RS:0;5eb3d201e8c9:39981 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T07:32:59,731 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33959, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T07:32:59,731 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37987 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5eb3d201e8c9,39981,1732519979497 2024-11-25T07:32:59,731 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37987 {}] master.ServerManager(517): Registering regionserver=5eb3d201e8c9,39981,1732519979497 2024-11-25T07:32:59,732 DEBUG [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89 2024-11-25T07:32:59,732 DEBUG [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34639 2024-11-25T07:32:59,732 DEBUG [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T07:32:59,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:32:59,734 DEBUG [RS:0;5eb3d201e8c9:39981 {}] zookeeper.ZKUtil(111): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5eb3d201e8c9,39981,1732519979497 2024-11-25T07:32:59,734 WARN [RS:0;5eb3d201e8c9:39981 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T07:32:59,734 INFO [RS:0;5eb3d201e8c9:39981 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:32:59,735 DEBUG [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/WALs/5eb3d201e8c9,39981,1732519979497 2024-11-25T07:32:59,735 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5eb3d201e8c9,39981,1732519979497] 2024-11-25T07:32:59,737 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T07:32:59,739 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T07:32:59,739 INFO [RS:0;5eb3d201e8c9:39981 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T07:32:59,739 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,739 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T07:32:59,740 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T07:32:59,740 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=2, maxPoolSize=2 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5eb3d201e8c9:0, corePoolSize=1, maxPoolSize=1 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:32:59,740 DEBUG [RS:0;5eb3d201e8c9:39981 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5eb3d201e8c9:0, corePoolSize=3, maxPoolSize=3 2024-11-25T07:32:59,741 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,741 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,741 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,741 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,741 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,741 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,39981,1732519979497-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:32:59,755 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T07:32:59,755 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,39981,1732519979497-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,755 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,755 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.Replication(171): 5eb3d201e8c9,39981,1732519979497 started 2024-11-25T07:32:59,768 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:32:59,769 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(1482): Serving as 5eb3d201e8c9,39981,1732519979497, RpcServer on 5eb3d201e8c9/172.17.0.2:39981, sessionid=0x1014e0ad4430001 2024-11-25T07:32:59,769 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T07:32:59,769 DEBUG [RS:0;5eb3d201e8c9:39981 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5eb3d201e8c9,39981,1732519979497 2024-11-25T07:32:59,769 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,39981,1732519979497' 2024-11-25T07:32:59,769 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T07:32:59,769 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T07:32:59,770 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T07:32:59,770 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T07:32:59,770 DEBUG [RS:0;5eb3d201e8c9:39981 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5eb3d201e8c9,39981,1732519979497 2024-11-25T07:32:59,770 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5eb3d201e8c9,39981,1732519979497' 2024-11-25T07:32:59,770 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T07:32:59,770 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T07:32:59,770 DEBUG [RS:0;5eb3d201e8c9:39981 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T07:32:59,770 INFO [RS:0;5eb3d201e8c9:39981 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T07:32:59,770 INFO [RS:0;5eb3d201e8c9:39981 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T07:32:59,857 WARN [5eb3d201e8c9:37987 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T07:32:59,872 INFO [RS:0;5eb3d201e8c9:39981 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C39981%2C1732519979497, suffix=, logDir=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/WALs/5eb3d201e8c9,39981,1732519979497, archiveDir=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/oldWALs, maxLogs=32 2024-11-25T07:32:59,872 INFO [RS:0;5eb3d201e8c9:39981 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C39981%2C1732519979497.1732519979872 2024-11-25T07:32:59,877 INFO [RS:0;5eb3d201e8c9:39981 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/WALs/5eb3d201e8c9,39981,1732519979497/5eb3d201e8c9%2C39981%2C1732519979497.1732519979872 2024-11-25T07:32:59,878 DEBUG [RS:0;5eb3d201e8c9:39981 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36271:36271),(127.0.0.1/127.0.0.1:42249:42249)] 2024-11-25T07:32:59,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:32:59,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:33:00,107 DEBUG [5eb3d201e8c9:37987 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T07:33:00,108 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5eb3d201e8c9,39981,1732519979497 2024-11-25T07:33:00,109 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,39981,1732519979497, state=OPENING 2024-11-25T07:33:00,112 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T07:33:00,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:33:00,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:33:00,113 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T07:33:00,113 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:33:00,113 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:33:00,113 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,39981,1732519979497}] 2024-11-25T07:33:00,266 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T07:33:00,268 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54277, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T07:33:00,271 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T07:33:00,271 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:33:00,272 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5eb3d201e8c9%2C39981%2C1732519979497.meta, suffix=.meta, logDir=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/WALs/5eb3d201e8c9,39981,1732519979497, archiveDir=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/oldWALs, maxLogs=32 2024-11-25T07:33:00,273 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 5eb3d201e8c9%2C39981%2C1732519979497.meta.1732519980273.meta 2024-11-25T07:33:00,281 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/WALs/5eb3d201e8c9,39981,1732519979497/5eb3d201e8c9%2C39981%2C1732519979497.meta.1732519980273.meta 2024-11-25T07:33:00,284 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42249:42249),(127.0.0.1/127.0.0.1:36271:36271)] 2024-11-25T07:33:00,285 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T07:33:00,285 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T07:33:00,285 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T07:33:00,285 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T07:33:00,285 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T07:33:00,285 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T07:33:00,285 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T07:33:00,286 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T07:33:00,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T07:33:00,289 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T07:33:00,289 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:33:00,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:33:00,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T07:33:00,290 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T07:33:00,290 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:33:00,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:33:00,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T07:33:00,291 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T07:33:00,291 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:33:00,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:33:00,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T07:33:00,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T07:33:00,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T07:33:00,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T07:33:00,292 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T07:33:00,293 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/data/hbase/meta/1588230740 2024-11-25T07:33:00,293 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/data/hbase/meta/1588230740 2024-11-25T07:33:00,294 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T07:33:00,294 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T07:33:00,295 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T07:33:00,296 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T07:33:00,297 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=750018, jitterRate=-0.046302855014801025}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T07:33:00,297 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T07:33:00,297 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732519980286Writing region info on filesystem at 1732519980286Initializing all the Stores at 1732519980286Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519980286Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519980288 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732519980288Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732519980288Cleaning up temporary data from old regions at 1732519980294 (+6 ms)Running coprocessor post-open hooks at 1732519980297 (+3 ms)Region opened successfully at 1732519980297 2024-11-25T07:33:00,298 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732519980265 2024-11-25T07:33:00,300 DEBUG [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T07:33:00,300 INFO [RS_OPEN_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T07:33:00,300 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5eb3d201e8c9,39981,1732519979497 2024-11-25T07:33:00,301 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5eb3d201e8c9,39981,1732519979497, state=OPEN 2024-11-25T07:33:00,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:33:00,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T07:33:00,309 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:33:00,309 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T07:33:00,309 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,39981,1732519979497 2024-11-25T07:33:00,311 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T07:33:00,311 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5eb3d201e8c9,39981,1732519979497 in 196 msec 2024-11-25T07:33:00,313 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T07:33:00,313 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 606 msec 2024-11-25T07:33:00,313 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T07:33:00,313 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T07:33:00,314 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:33:00,315 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,39981,1732519979497, seqNum=-1] 2024-11-25T07:33:00,315 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:33:00,316 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53919, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:33:00,320 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 642 msec 2024-11-25T07:33:00,320 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732519980320, completionTime=-1 2024-11-25T07:33:00,320 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T07:33:00,320 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T07:33:00,322 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T07:33:00,322 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732520040322 2024-11-25T07:33:00,322 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732520100322 2024-11-25T07:33:00,322 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-25T07:33:00,323 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,37987,1732519979456-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T07:33:00,323 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,37987,1732519979456-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:33:00,323 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,37987,1732519979456-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:33:00,323 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5eb3d201e8c9:37987, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:33:00,323 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T07:33:00,323 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T07:33:00,324 DEBUG [master/5eb3d201e8c9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T07:33:00,326 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.765sec 2024-11-25T07:33:00,326 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T07:33:00,326 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T07:33:00,326 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T07:33:00,326 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T07:33:00,326 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T07:33:00,326 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,37987,1732519979456-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T07:33:00,326 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,37987,1732519979456-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T07:33:00,328 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T07:33:00,328 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T07:33:00,328 INFO [master/5eb3d201e8c9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5eb3d201e8c9,37987,1732519979456-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T07:33:00,413 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ce8374, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:33:00,413 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5eb3d201e8c9,37987,-1 for getting cluster id 2024-11-25T07:33:00,413 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T07:33:00,415 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '77fcc343-5067-46c0-b2aa-b92b8322cf33' 2024-11-25T07:33:00,415 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T07:33:00,415 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "77fcc343-5067-46c0-b2aa-b92b8322cf33" 2024-11-25T07:33:00,415 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e0eef60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:33:00,415 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5eb3d201e8c9,37987,-1] 2024-11-25T07:33:00,416 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T07:33:00,416 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:33:00,417 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34114, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T07:33:00,417 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4041b4de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T07:33:00,418 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T07:33:00,418 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5eb3d201e8c9,39981,1732519979497, seqNum=-1] 2024-11-25T07:33:00,419 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T07:33:00,419 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47002, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T07:33:00,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5eb3d201e8c9,37987,1732519979456 2024-11-25T07:33:00,421 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T07:33:00,423 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T07:33:00,423 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T07:33:00,425 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/WALs/test.com,8080,1, archiveDir=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/oldWALs, maxLogs=32 2024-11-25T07:33:00,426 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732519980426 2024-11-25T07:33:00,430 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/WALs/test.com,8080,1/test.com%2C8080%2C1.1732519980426 2024-11-25T07:33:00,431 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36271:36271),(127.0.0.1/127.0.0.1:42249:42249)] 2024-11-25T07:33:00,431 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732519980431 2024-11-25T07:33:00,435 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,435 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,436 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,436 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,436 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,436 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/WALs/test.com,8080,1/test.com%2C8080%2C1.1732519980426 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/WALs/test.com,8080,1/test.com%2C8080%2C1.1732519980431 2024-11-25T07:33:00,436 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36271:36271),(127.0.0.1/127.0.0.1:42249:42249)] 2024-11-25T07:33:00,437 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/WALs/test.com,8080,1/test.com%2C8080%2C1.1732519980426 is not closed yet, will try archiving it next time 2024-11-25T07:33:00,437 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,437 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,437 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,437 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741835_1011 (size=93) 2024-11-25T07:33:00,437 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741835_1011 (size=93) 2024-11-25T07:33:00,438 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/WALs/test.com,8080,1/test.com%2C8080%2C1.1732519980426 to hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/oldWALs/test.com%2C8080%2C1.1732519980426 2024-11-25T07:33:00,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741836_1012 (size=93) 2024-11-25T07:33:00,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741836_1012 (size=93) 2024-11-25T07:33:00,441 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/oldWALs 2024-11-25T07:33:00,441 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732519980431) 2024-11-25T07:33:00,441 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T07:33:00,441 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:33:00,441 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:33:00,441 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:33:00,441 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:33:00,441 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T07:33:00,441 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T07:33:00,441 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=816752116, stopped=false 2024-11-25T07:33:00,441 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5eb3d201e8c9,37987,1732519979456 2024-11-25T07:33:00,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:33:00,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T07:33:00,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:33:00,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:33:00,444 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:33:00,445 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T07:33:00,445 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:33:00,445 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:33:00,445 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5eb3d201e8c9,39981,1732519979497' ***** 2024-11-25T07:33:00,445 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T07:33:00,445 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:33:00,445 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T07:33:00,445 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T07:33:00,445 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T07:33:00,445 INFO [RS:0;5eb3d201e8c9:39981 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T07:33:00,445 INFO [RS:0;5eb3d201e8c9:39981 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T07:33:00,446 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(959): stopping server 5eb3d201e8c9,39981,1732519979497 2024-11-25T07:33:00,446 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:33:00,446 INFO [RS:0;5eb3d201e8c9:39981 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5eb3d201e8c9:39981. 2024-11-25T07:33:00,446 DEBUG [RS:0;5eb3d201e8c9:39981 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T07:33:00,446 DEBUG [RS:0;5eb3d201e8c9:39981 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:33:00,446 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T07:33:00,446 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T07:33:00,446 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T07:33:00,446 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T07:33:00,446 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-25T07:33:00,446 DEBUG [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-25T07:33:00,446 DEBUG [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T07:33:00,446 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T07:33:00,446 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T07:33:00,446 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T07:33:00,446 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T07:33:00,446 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T07:33:00,446 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-25T07:33:00,462 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/data/hbase/meta/1588230740/.tmp/ns/fbf61e39f2524893b2f9bfe4bf14a2cb is 43, key is default/ns:d/1732519980316/Put/seqid=0 2024-11-25T07:33:00,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741837_1013 (size=5153) 2024-11-25T07:33:00,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741837_1013 (size=5153) 2024-11-25T07:33:00,466 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/data/hbase/meta/1588230740/.tmp/ns/fbf61e39f2524893b2f9bfe4bf14a2cb 2024-11-25T07:33:00,471 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/data/hbase/meta/1588230740/.tmp/ns/fbf61e39f2524893b2f9bfe4bf14a2cb as hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/data/hbase/meta/1588230740/ns/fbf61e39f2524893b2f9bfe4bf14a2cb 2024-11-25T07:33:00,475 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/data/hbase/meta/1588230740/ns/fbf61e39f2524893b2f9bfe4bf14a2cb, entries=2, sequenceid=6, filesize=5.0 K 2024-11-25T07:33:00,476 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-25T07:33:00,479 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-25T07:33:00,480 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T07:33:00,480 INFO [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T07:33:00,480 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732519980446Running coprocessor pre-close hooks at 1732519980446Disabling compacts and flushes for region at 1732519980446Disabling writes for close at 1732519980446Obtaining lock to block concurrent updates at 1732519980446Preparing flush snapshotting stores in 1588230740 at 1732519980446Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732519980447 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732519980447Flushing 1588230740/ns: creating writer at 1732519980447Flushing 1588230740/ns: appending metadata at 1732519980461 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732519980461Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@630a87d: reopening flushed file at 1732519980471 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1732519980476 (+5 ms)Writing region close event to WAL at 1732519980477 (+1 ms)Running coprocessor post-close hooks at 1732519980480 (+3 ms)Closed at 1732519980480 2024-11-25T07:33:00,480 DEBUG [RS_CLOSE_META-regionserver/5eb3d201e8c9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T07:33:00,646 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(976): stopping server 5eb3d201e8c9,39981,1732519979497; all regions closed. 2024-11-25T07:33:00,647 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,647 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,647 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,647 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,647 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741834_1010 (size=1152) 2024-11-25T07:33:00,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741834_1010 (size=1152) 2024-11-25T07:33:00,651 DEBUG [RS:0;5eb3d201e8c9:39981 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/oldWALs 2024-11-25T07:33:00,651 INFO [RS:0;5eb3d201e8c9:39981 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C39981%2C1732519979497.meta:.meta(num 1732519980273) 2024-11-25T07:33:00,651 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,652 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,652 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,652 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,652 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741833_1009 (size=93) 2024-11-25T07:33:00,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741833_1009 (size=93) 2024-11-25T07:33:00,655 DEBUG [RS:0;5eb3d201e8c9:39981 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/oldWALs 2024-11-25T07:33:00,655 INFO [RS:0;5eb3d201e8c9:39981 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 5eb3d201e8c9%2C39981%2C1732519979497:(num 1732519979872) 2024-11-25T07:33:00,655 DEBUG [RS:0;5eb3d201e8c9:39981 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T07:33:00,655 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T07:33:00,656 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:33:00,656 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.ChoreService(370): Chore service for: regionserver/5eb3d201e8c9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-25T07:33:00,656 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:33:00,656 INFO [regionserver/5eb3d201e8c9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:33:00,656 INFO [RS:0;5eb3d201e8c9:39981 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39981 2024-11-25T07:33:00,658 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:33:00,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5eb3d201e8c9,39981,1732519979497 2024-11-25T07:33:00,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T07:33:00,659 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5eb3d201e8c9,39981,1732519979497] 2024-11-25T07:33:00,660 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5eb3d201e8c9,39981,1732519979497 already deleted, retry=false 2024-11-25T07:33:00,660 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5eb3d201e8c9,39981,1732519979497 expired; onlineServers=0 2024-11-25T07:33:00,660 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5eb3d201e8c9,37987,1732519979456' ***** 2024-11-25T07:33:00,660 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T07:33:00,660 INFO [M:0;5eb3d201e8c9:37987 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T07:33:00,660 INFO [M:0;5eb3d201e8c9:37987 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T07:33:00,660 DEBUG [M:0;5eb3d201e8c9:37987 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T07:33:00,660 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T07:33:00,660 DEBUG [M:0;5eb3d201e8c9:37987 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T07:33:00,660 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519979680 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.large.0-1732519979680,5,FailOnTimeoutGroup] 2024-11-25T07:33:00,660 DEBUG [master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519979681 {}] cleaner.HFileCleaner(306): Exit Thread[master/5eb3d201e8c9:0:becomeActiveMaster-HFileCleaner.small.0-1732519979681,5,FailOnTimeoutGroup] 2024-11-25T07:33:00,661 INFO [M:0;5eb3d201e8c9:37987 {}] hbase.ChoreService(370): Chore service for: master/5eb3d201e8c9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T07:33:00,661 INFO [M:0;5eb3d201e8c9:37987 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T07:33:00,661 DEBUG [M:0;5eb3d201e8c9:37987 {}] master.HMaster(1795): Stopping service threads 2024-11-25T07:33:00,661 INFO [M:0;5eb3d201e8c9:37987 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T07:33:00,661 INFO [M:0;5eb3d201e8c9:37987 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T07:33:00,661 INFO [M:0;5eb3d201e8c9:37987 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T07:33:00,661 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T07:33:00,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T07:33:00,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T07:33:00,664 DEBUG [M:0;5eb3d201e8c9:37987 {}] zookeeper.ZKUtil(347): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T07:33:00,664 WARN [M:0;5eb3d201e8c9:37987 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T07:33:00,664 INFO [M:0;5eb3d201e8c9:37987 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/.lastflushedseqids 2024-11-25T07:33:00,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741838_1014 (size=99) 2024-11-25T07:33:00,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741838_1014 (size=99) 2024-11-25T07:33:00,669 INFO [M:0;5eb3d201e8c9:37987 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T07:33:00,669 INFO [M:0;5eb3d201e8c9:37987 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T07:33:00,669 DEBUG [M:0;5eb3d201e8c9:37987 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T07:33:00,669 INFO [M:0;5eb3d201e8c9:37987 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:33:00,669 DEBUG [M:0;5eb3d201e8c9:37987 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:33:00,669 DEBUG [M:0;5eb3d201e8c9:37987 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T07:33:00,669 DEBUG [M:0;5eb3d201e8c9:37987 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:33:00,670 INFO [M:0;5eb3d201e8c9:37987 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-25T07:33:00,684 DEBUG [M:0;5eb3d201e8c9:37987 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/11f64a8e62654ce7969522dd461bebd8 is 82, key is hbase:meta,,1/info:regioninfo/1732519980300/Put/seqid=0 2024-11-25T07:33:00,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741839_1015 (size=5672) 2024-11-25T07:33:00,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741839_1015 (size=5672) 2024-11-25T07:33:00,689 INFO [M:0;5eb3d201e8c9:37987 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/11f64a8e62654ce7969522dd461bebd8 2024-11-25T07:33:00,706 DEBUG [M:0;5eb3d201e8c9:37987 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/72903ac0c5bc47f3a35a1e0d97129705 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732519980319/Put/seqid=0 2024-11-25T07:33:00,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741840_1016 (size=5275) 2024-11-25T07:33:00,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741840_1016 (size=5275) 2024-11-25T07:33:00,711 INFO [M:0;5eb3d201e8c9:37987 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/72903ac0c5bc47f3a35a1e0d97129705 2024-11-25T07:33:00,729 DEBUG [M:0;5eb3d201e8c9:37987 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5d2092d634884580b65857e9f1ed14d2 is 69, key is 5eb3d201e8c9,39981,1732519979497/rs:state/1732519979731/Put/seqid=0 2024-11-25T07:33:00,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741841_1017 (size=5156) 2024-11-25T07:33:00,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741841_1017 (size=5156) 2024-11-25T07:33:00,733 INFO [M:0;5eb3d201e8c9:37987 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5d2092d634884580b65857e9f1ed14d2 2024-11-25T07:33:00,750 DEBUG [M:0;5eb3d201e8c9:37987 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ceaa23e485ce44bbab28930bd43c3d39 is 52, key is load_balancer_on/state:d/1732519980422/Put/seqid=0 2024-11-25T07:33:00,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741842_1018 (size=5056) 2024-11-25T07:33:00,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741842_1018 (size=5056) 2024-11-25T07:33:00,755 INFO [M:0;5eb3d201e8c9:37987 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ceaa23e485ce44bbab28930bd43c3d39 2024-11-25T07:33:00,759 DEBUG [M:0;5eb3d201e8c9:37987 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/11f64a8e62654ce7969522dd461bebd8 as hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/11f64a8e62654ce7969522dd461bebd8 2024-11-25T07:33:00,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:33:00,759 INFO [RS:0;5eb3d201e8c9:39981 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:33:00,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39981-0x1014e0ad4430001, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:33:00,759 INFO [RS:0;5eb3d201e8c9:39981 {}] regionserver.HRegionServer(1031): Exiting; stopping=5eb3d201e8c9,39981,1732519979497; zookeeper connection closed. 2024-11-25T07:33:00,759 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@d72f24c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@d72f24c 2024-11-25T07:33:00,760 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T07:33:00,763 INFO [M:0;5eb3d201e8c9:37987 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/11f64a8e62654ce7969522dd461bebd8, entries=8, sequenceid=29, filesize=5.5 K 2024-11-25T07:33:00,763 DEBUG [M:0;5eb3d201e8c9:37987 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/72903ac0c5bc47f3a35a1e0d97129705 as hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/72903ac0c5bc47f3a35a1e0d97129705 2024-11-25T07:33:00,767 INFO [M:0;5eb3d201e8c9:37987 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/72903ac0c5bc47f3a35a1e0d97129705, entries=3, sequenceid=29, filesize=5.2 K 2024-11-25T07:33:00,767 DEBUG [M:0;5eb3d201e8c9:37987 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5d2092d634884580b65857e9f1ed14d2 as hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5d2092d634884580b65857e9f1ed14d2 2024-11-25T07:33:00,771 INFO [M:0;5eb3d201e8c9:37987 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5d2092d634884580b65857e9f1ed14d2, entries=1, sequenceid=29, filesize=5.0 K 2024-11-25T07:33:00,771 DEBUG [M:0;5eb3d201e8c9:37987 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ceaa23e485ce44bbab28930bd43c3d39 as hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ceaa23e485ce44bbab28930bd43c3d39 2024-11-25T07:33:00,776 INFO [M:0;5eb3d201e8c9:37987 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34639/user/jenkins/test-data/f935e6af-babf-4dec-82c1-8eb22ca3cb89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ceaa23e485ce44bbab28930bd43c3d39, entries=1, sequenceid=29, filesize=4.9 K 2024-11-25T07:33:00,777 INFO [M:0;5eb3d201e8c9:37987 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=29, compaction requested=false 2024-11-25T07:33:00,778 INFO [M:0;5eb3d201e8c9:37987 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T07:33:00,778 DEBUG [M:0;5eb3d201e8c9:37987 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732519980669Disabling compacts and flushes for region at 1732519980669Disabling writes for close at 1732519980669Obtaining lock to block concurrent updates at 1732519980670 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732519980670Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732519980670Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732519980670Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732519980671 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732519980684 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732519980684Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732519980693 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732519980706 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732519980706Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732519980715 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732519980729 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732519980729Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732519980737 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732519980750 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732519980750Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19f9796: reopening flushed file at 1732519980758 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69622f45: reopening flushed file at 1732519980763 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59c74632: reopening flushed file at 1732519980767 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7172a196: reopening flushed file at 1732519980771 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=29, compaction requested=false at 1732519980777 (+6 ms)Writing region close event to WAL at 1732519980778 (+1 ms)Closed at 1732519980778 2024-11-25T07:33:00,779 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,779 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,779 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,779 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,779 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T07:33:00,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36327 is added to blk_1073741830_1006 (size=10311) 2024-11-25T07:33:00,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34361 is added to blk_1073741830_1006 (size=10311) 2024-11-25T07:33:00,781 INFO [M:0;5eb3d201e8c9:37987 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T07:33:00,781 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T07:33:00,781 INFO [M:0;5eb3d201e8c9:37987 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37987 2024-11-25T07:33:00,781 INFO [M:0;5eb3d201e8c9:37987 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T07:33:00,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:33:00,883 INFO [M:0;5eb3d201e8c9:37987 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T07:33:00,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37987-0x1014e0ad4430000, quorum=127.0.0.1:64966, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T07:33:00,885 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@554afc70{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:33:00,886 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@cbcac8c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:33:00,886 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:33:00,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2629cfe0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:33:00,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5dba136e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/hadoop.log.dir/,STOPPED} 2024-11-25T07:33:00,887 WARN [BP-891294055-172.17.0.2-1732519978811 heartbeating to localhost/127.0.0.1:34639 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:33:00,887 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:33:00,887 WARN [BP-891294055-172.17.0.2-1732519978811 heartbeating to localhost/127.0.0.1:34639 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-891294055-172.17.0.2-1732519978811 (Datanode Uuid 9d8ccbe8-796a-4adc-81f0-f20febddf88e) service to localhost/127.0.0.1:34639 2024-11-25T07:33:00,887 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:33:00,888 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/cluster_65c7b312-8e93-1fce-8e40-0e646737ae8f/data/data3/current/BP-891294055-172.17.0.2-1732519978811 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:33:00,888 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/cluster_65c7b312-8e93-1fce-8e40-0e646737ae8f/data/data4/current/BP-891294055-172.17.0.2-1732519978811 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:33:00,888 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:33:00,890 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@186f146f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T07:33:00,890 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@231297c7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:33:00,890 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:33:00,890 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a7b167c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:33:00,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a86d190{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/hadoop.log.dir/,STOPPED} 2024-11-25T07:33:00,892 WARN [BP-891294055-172.17.0.2-1732519978811 heartbeating to localhost/127.0.0.1:34639 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T07:33:00,892 WARN [BP-891294055-172.17.0.2-1732519978811 heartbeating to localhost/127.0.0.1:34639 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-891294055-172.17.0.2-1732519978811 (Datanode Uuid 76b2160d-98a3-410c-bb97-ec3551e3c600) service to localhost/127.0.0.1:34639 2024-11-25T07:33:00,892 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T07:33:00,892 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T07:33:00,892 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/cluster_65c7b312-8e93-1fce-8e40-0e646737ae8f/data/data1/current/BP-891294055-172.17.0.2-1732519978811 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:33:00,893 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/cluster_65c7b312-8e93-1fce-8e40-0e646737ae8f/data/data2/current/BP-891294055-172.17.0.2-1732519978811 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T07:33:00,893 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T07:33:00,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@40f3733a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T07:33:00,898 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c3c893{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T07:33:00,898 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T07:33:00,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ed058d1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T07:33:00,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@309e84ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34dd8fc9-a0f0-5753-a198-95be1a81ad7e/hadoop.log.dir/,STOPPED} 2024-11-25T07:33:00,904 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T07:33:00,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T07:33:00,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44859,1732519786393/5eb3d201e8c9%2C44859%2C1732519786393.meta.1732519787635.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:33:00,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34285/user/jenkins/test-data/77b8c077-0b67-3de4-845c-5c1d1fd3360a/WALs/5eb3d201e8c9,44807,1732519787746/5eb3d201e8c9%2C44807%2C1732519787746.1732519787946 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T07:33:00,929 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 229) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34639 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34639 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34639 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:34639 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34639 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34639 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34639 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34639 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=541 (was 518) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=71 (was 51) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7910 (was 7920)