2024-12-06 10:09:59,657 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@dc7df28 2024-12-06 10:09:59,674 main DEBUG Took 0.013964 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-06 10:09:59,674 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-06 10:09:59,675 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-06 10:09:59,676 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-06 10:09:59,677 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:09:59,685 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-06 10:09:59,701 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,703 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:09:59,704 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,705 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:09:59,705 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,706 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:09:59,707 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,707 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:09:59,708 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,708 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:09:59,709 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,709 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:09:59,710 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,710 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:09:59,711 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,711 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:09:59,712 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,712 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:09:59,713 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,713 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:09:59,714 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,714 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:09:59,715 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,715 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 10:09:59,716 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,716 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-06 10:09:59,718 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 10:09:59,720 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-06 10:09:59,721 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-06 10:09:59,722 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-06 10:09:59,724 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-06 10:09:59,724 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-06 10:09:59,734 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-06 10:09:59,736 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-06 10:09:59,739 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-06 10:09:59,739 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-06 10:09:59,739 main DEBUG createAppenders(={Console}) 2024-12-06 10:09:59,740 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@dc7df28 initialized 2024-12-06 10:09:59,740 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@dc7df28 2024-12-06 10:09:59,741 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@dc7df28 OK. 2024-12-06 10:09:59,742 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-06 10:09:59,742 main DEBUG OutputStream closed 2024-12-06 10:09:59,742 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-06 10:09:59,743 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-06 10:09:59,743 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@316bcf94 OK 2024-12-06 10:09:59,832 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-06 10:09:59,834 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-06 10:09:59,836 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-06 10:09:59,837 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-06 10:09:59,838 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-06 10:09:59,838 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-06 10:09:59,839 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-06 10:09:59,839 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-06 10:09:59,839 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-06 10:09:59,840 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-06 10:09:59,840 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-06 10:09:59,841 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-06 10:09:59,841 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-06 10:09:59,842 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-06 10:09:59,842 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-06 10:09:59,843 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-06 10:09:59,843 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-06 10:09:59,844 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-06 10:09:59,847 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06 10:09:59,848 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@21fd5faa) with optional ClassLoader: null 2024-12-06 10:09:59,848 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-06 10:09:59,850 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@21fd5faa] started OK. 2024-12-06T10:10:00,147 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6 2024-12-06 10:10:00,151 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-06 10:10:00,151 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06T10:10:00,163 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-06T10:10:00,200 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=286, MaxFileDescriptor=1048576, SystemLoadAverage=454, ProcessCount=11, AvailableMemoryMB=8219 2024-12-06T10:10:00,204 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T10:10:00,208 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/cluster_87c68896-5857-2be3-30c5-0ba37489c94a, deleteOnExit=true 2024-12-06T10:10:00,208 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T10:10:00,209 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/test.cache.data in system properties and HBase conf 2024-12-06T10:10:00,210 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T10:10:00,211 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/hadoop.log.dir in system properties and HBase conf 2024-12-06T10:10:00,212 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T10:10:00,212 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T10:10:00,213 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T10:10:00,315 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-06T10:10:00,424 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T10:10:00,428 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:10:00,429 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:10:00,430 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T10:10:00,430 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:10:00,431 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T10:10:00,431 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T10:10:00,432 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:10:00,432 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:10:00,433 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T10:10:00,433 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/nfs.dump.dir in system properties and HBase conf 2024-12-06T10:10:00,434 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/java.io.tmpdir in system properties and HBase conf 2024-12-06T10:10:00,434 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:10:00,435 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T10:10:00,435 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T10:10:01,191 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T10:10:01,654 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-06T10:10:01,758 INFO [Time-limited test {}] log.Log(170): Logging initialized @3048ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-06T10:10:01,843 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:10:01,913 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:10:01,932 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:10:01,932 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:10:01,934 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:10:01,948 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:10:01,950 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@88aab13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:10:01,951 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74468826{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:10:02,166 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5682c4d1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/java.io.tmpdir/jetty-localhost-38525-hadoop-hdfs-3_4_1-tests_jar-_-any-17048882492395967552/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:10:02,174 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ff1a6c1{HTTP/1.1, (http/1.1)}{localhost:38525} 2024-12-06T10:10:02,175 INFO [Time-limited test {}] server.Server(415): Started @3466ms 2024-12-06T10:10:02,202 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T10:10:02,603 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:10:02,612 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:10:02,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:10:02,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:10:02,614 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:10:02,614 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2276bd44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:10:02,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b4ce9e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:10:02,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6aad8790{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/java.io.tmpdir/jetty-localhost-39721-hadoop-hdfs-3_4_1-tests_jar-_-any-15661831230881329410/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:10:02,743 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@587d1dca{HTTP/1.1, (http/1.1)}{localhost:39721} 2024-12-06T10:10:02,744 INFO [Time-limited test {}] server.Server(415): Started @4035ms 2024-12-06T10:10:02,811 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:10:03,021 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:10:03,028 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:10:03,031 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:10:03,032 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:10:03,032 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:10:03,035 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4debea22{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:10:03,036 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6eb1b261{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:10:03,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@163cfad6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/java.io.tmpdir/jetty-localhost-43329-hadoop-hdfs-3_4_1-tests_jar-_-any-8952972494297737435/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:10:03,201 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f952caa{HTTP/1.1, (http/1.1)}{localhost:43329} 2024-12-06T10:10:03,201 INFO [Time-limited test {}] server.Server(415): Started @4492ms 2024-12-06T10:10:03,204 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:10:03,426 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/cluster_87c68896-5857-2be3-30c5-0ba37489c94a/dfs/data/data1/current/BP-858773073-172.17.0.2-1733479801314/current, will proceed with Du for space computation calculation, 2024-12-06T10:10:03,429 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/cluster_87c68896-5857-2be3-30c5-0ba37489c94a/dfs/data/data2/current/BP-858773073-172.17.0.2-1733479801314/current, will proceed with Du for space computation calculation, 2024-12-06T10:10:03,430 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/cluster_87c68896-5857-2be3-30c5-0ba37489c94a/dfs/data/data4/current/BP-858773073-172.17.0.2-1733479801314/current, will proceed with Du for space computation calculation, 2024-12-06T10:10:03,429 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/cluster_87c68896-5857-2be3-30c5-0ba37489c94a/dfs/data/data3/current/BP-858773073-172.17.0.2-1733479801314/current, will proceed with Du for space computation calculation, 2024-12-06T10:10:03,495 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:10:03,496 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:10:03,598 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x920d97957c0a300d with lease ID 0xf301eecb8e69089a: Processing first storage report for DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff from datanode DatanodeRegistration(127.0.0.1:41843, datanodeUuid=16a6a190-58eb-4fa6-829b-64f750470630, infoPort=45777, infoSecurePort=0, ipcPort=40417, storageInfo=lv=-57;cid=testClusterID;nsid=953123071;c=1733479801314) 2024-12-06T10:10:03,600 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x920d97957c0a300d with lease ID 0xf301eecb8e69089a: from storage DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff node DatanodeRegistration(127.0.0.1:41843, datanodeUuid=16a6a190-58eb-4fa6-829b-64f750470630, infoPort=45777, infoSecurePort=0, ipcPort=40417, storageInfo=lv=-57;cid=testClusterID;nsid=953123071;c=1733479801314), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T10:10:03,600 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x920d97957c0a300d with lease ID 0xf301eecb8e69089a: Processing first storage report for DS-a31fb2ad-7b83-4ba1-accf-c9aea1bbfb60 from datanode DatanodeRegistration(127.0.0.1:41843, datanodeUuid=16a6a190-58eb-4fa6-829b-64f750470630, infoPort=45777, infoSecurePort=0, ipcPort=40417, storageInfo=lv=-57;cid=testClusterID;nsid=953123071;c=1733479801314) 2024-12-06T10:10:03,601 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x920d97957c0a300d with lease ID 0xf301eecb8e69089a: from storage DS-a31fb2ad-7b83-4ba1-accf-c9aea1bbfb60 node DatanodeRegistration(127.0.0.1:41843, datanodeUuid=16a6a190-58eb-4fa6-829b-64f750470630, infoPort=45777, infoSecurePort=0, ipcPort=40417, storageInfo=lv=-57;cid=testClusterID;nsid=953123071;c=1733479801314), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:10:03,601 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8791847da59a18cc with lease ID 0xf301eecb8e690899: Processing first storage report for DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9 from datanode DatanodeRegistration(127.0.0.1:42563, datanodeUuid=0b98e0f6-d1f9-4dc9-acf4-5709e9f091c9, infoPort=35965, infoSecurePort=0, ipcPort=39501, storageInfo=lv=-57;cid=testClusterID;nsid=953123071;c=1733479801314) 2024-12-06T10:10:03,601 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8791847da59a18cc with lease ID 0xf301eecb8e690899: from storage DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9 node DatanodeRegistration(127.0.0.1:42563, datanodeUuid=0b98e0f6-d1f9-4dc9-acf4-5709e9f091c9, infoPort=35965, infoSecurePort=0, ipcPort=39501, storageInfo=lv=-57;cid=testClusterID;nsid=953123071;c=1733479801314), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:10:03,602 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8791847da59a18cc with lease ID 0xf301eecb8e690899: Processing first storage report for DS-bad739e8-3699-4f06-b9aa-9e2cee360c44 from datanode DatanodeRegistration(127.0.0.1:42563, datanodeUuid=0b98e0f6-d1f9-4dc9-acf4-5709e9f091c9, infoPort=35965, infoSecurePort=0, ipcPort=39501, storageInfo=lv=-57;cid=testClusterID;nsid=953123071;c=1733479801314) 2024-12-06T10:10:03,602 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8791847da59a18cc with lease ID 0xf301eecb8e690899: from storage DS-bad739e8-3699-4f06-b9aa-9e2cee360c44 node DatanodeRegistration(127.0.0.1:42563, datanodeUuid=0b98e0f6-d1f9-4dc9-acf4-5709e9f091c9, infoPort=35965, infoSecurePort=0, ipcPort=39501, storageInfo=lv=-57;cid=testClusterID;nsid=953123071;c=1733479801314), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T10:10:03,703 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6 2024-12-06T10:10:03,802 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/cluster_87c68896-5857-2be3-30c5-0ba37489c94a/zookeeper_0, clientPort=49614, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/cluster_87c68896-5857-2be3-30c5-0ba37489c94a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/cluster_87c68896-5857-2be3-30c5-0ba37489c94a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T10:10:03,814 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=49614 2024-12-06T10:10:03,828 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:10:03,833 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:10:04,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741825_1001 (size=7) 2024-12-06T10:10:04,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741825_1001 (size=7) 2024-12-06T10:10:04,552 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9 with version=8 2024-12-06T10:10:04,552 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/hbase-staging 2024-12-06T10:10:04,679 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-06T10:10:04,959 INFO [Time-limited test {}] client.ConnectionUtils(129): master/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:10:04,979 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:10:04,980 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:10:04,980 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:10:04,980 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:10:04,980 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:10:05,130 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:10:05,199 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-06T10:10:05,208 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-06T10:10:05,213 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:10:05,245 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 28519 (auto-detected) 2024-12-06T10:10:05,246 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-06T10:10:05,266 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36447 2024-12-06T10:10:05,277 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:10:05,281 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:10:05,299 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:36447 connecting to ZooKeeper ensemble=127.0.0.1:49614 2024-12-06T10:10:05,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:364470x0, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:10:05,337 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36447-0x10066ce64640000 connected 2024-12-06T10:10:05,366 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:10:05,369 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:10:05,372 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:10:05,376 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36447 2024-12-06T10:10:05,376 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36447 2024-12-06T10:10:05,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36447 2024-12-06T10:10:05,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36447 2024-12-06T10:10:05,378 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36447 2024-12-06T10:10:05,384 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9, hbase.cluster.distributed=false 2024-12-06T10:10:05,453 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:10:05,453 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:10:05,454 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:10:05,454 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:10:05,454 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:10:05,454 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:10:05,456 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:10:05,458 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:10:05,459 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45723 2024-12-06T10:10:05,461 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T10:10:05,467 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T10:10:05,468 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:10:05,472 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:10:05,477 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45723 connecting to ZooKeeper ensemble=127.0.0.1:49614 2024-12-06T10:10:05,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:457230x0, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:10:05,481 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45723-0x10066ce64640001 connected 2024-12-06T10:10:05,481 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:10:05,483 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:10:05,484 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:10:05,485 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45723 2024-12-06T10:10:05,486 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45723 2024-12-06T10:10:05,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45723 2024-12-06T10:10:05,489 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45723 2024-12-06T10:10:05,490 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45723 2024-12-06T10:10:05,493 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/552d6a33fa09,36447,1733479804672 2024-12-06T10:10:05,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:10:05,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:10:05,503 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/552d6a33fa09,36447,1733479804672 2024-12-06T10:10:05,510 DEBUG [M:0;552d6a33fa09:36447 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;552d6a33fa09:36447 2024-12-06T10:10:05,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:10:05,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:10:05,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:05,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:05,526 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:10:05,527 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:10:05,527 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/552d6a33fa09,36447,1733479804672 from backup master directory 2024-12-06T10:10:05,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:10:05,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/552d6a33fa09,36447,1733479804672 2024-12-06T10:10:05,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:10:05,531 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:10:05,531 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=552d6a33fa09,36447,1733479804672 2024-12-06T10:10:05,533 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-06T10:10:05,535 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-06T10:10:05,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741826_1002 (size=42) 2024-12-06T10:10:05,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741826_1002 (size=42) 2024-12-06T10:10:05,613 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/hbase.id with ID: c0bdc723-587d-4d15-8b8d-0ca63ff77ca3 2024-12-06T10:10:05,655 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:10:05,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:05,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:05,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741827_1003 (size=196) 2024-12-06T10:10:05,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741827_1003 (size=196) 2024-12-06T10:10:05,722 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:10:05,724 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T10:10:05,732 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:10:05,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:10:05,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:10:05,808 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store 2024-12-06T10:10:05,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741829_1005 (size=34) 2024-12-06T10:10:05,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741829_1005 (size=34) 2024-12-06T10:10:05,832 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-06T10:10:05,833 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:10:05,835 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:10:05,835 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:10:05,835 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:10:05,835 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:10:05,835 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:10:05,835 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:10:05,836 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:10:05,838 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/.initializing 2024-12-06T10:10:05,838 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/WALs/552d6a33fa09,36447,1733479804672 2024-12-06T10:10:05,856 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C36447%2C1733479804672, suffix=, logDir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/WALs/552d6a33fa09,36447,1733479804672, archiveDir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/oldWALs, maxLogs=10 2024-12-06T10:10:05,868 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C36447%2C1733479804672.1733479805865 2024-12-06T10:10:05,869 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(752): Using builder API via reflection for DFS file creation replicate flag. 2024-12-06T10:10:05,869 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(762): Using builder API via reflection for DFS file creation noLocalWrite flag. 2024-12-06T10:10:05,896 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/WALs/552d6a33fa09,36447,1733479804672/552d6a33fa09%2C36447%2C1733479804672.1733479805865 2024-12-06T10:10:05,907 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45777:45777),(127.0.0.1/127.0.0.1:35965:35965)] 2024-12-06T10:10:05,908 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:10:05,909 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:10:05,913 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:10:05,915 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:10:05,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:10:05,987 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T10:10:05,992 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:05,994 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:10:05,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:10:05,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T10:10:05,999 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:06,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:10:06,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:10:06,004 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T10:10:06,004 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:06,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:10:06,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:10:06,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T10:10:06,008 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:06,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:10:06,013 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:10:06,014 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:10:06,022 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T10:10:06,026 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:10:06,030 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:10:06,031 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=688952, jitterRate=-0.12395253777503967}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T10:10:06,035 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:10:06,036 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T10:10:06,065 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@100c1801, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:10:06,102 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T10:10:06,115 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T10:10:06,116 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T10:10:06,118 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T10:10:06,120 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-06T10:10:06,125 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-12-06T10:10:06,126 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T10:10:06,153 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T10:10:06,165 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T10:10:06,167 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T10:10:06,169 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T10:10:06,170 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T10:10:06,172 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T10:10:06,174 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T10:10:06,177 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T10:10:06,179 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T10:10:06,180 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T10:10:06,182 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T10:10:06,191 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T10:10:06,193 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T10:10:06,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:10:06,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:06,199 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=552d6a33fa09,36447,1733479804672, sessionid=0x10066ce64640000, setting cluster-up flag (Was=false) 2024-12-06T10:10:06,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:10:06,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:06,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:06,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:06,219 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T10:10:06,221 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,36447,1733479804672 2024-12-06T10:10:06,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:06,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:06,235 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T10:10:06,238 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,36447,1733479804672 2024-12-06T10:10:06,325 DEBUG [RS:0;552d6a33fa09:45723 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;552d6a33fa09:45723 2024-12-06T10:10:06,331 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(1008): ClusterId : c0bdc723-587d-4d15-8b8d-0ca63ff77ca3 2024-12-06T10:10:06,335 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T10:10:06,343 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T10:10:06,349 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T10:10:06,350 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T10:10:06,352 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T10:10:06,355 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T10:10:06,358 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T10:10:06,358 DEBUG [RS:0;552d6a33fa09:45723 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b2abb9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:10:06,360 DEBUG [RS:0;552d6a33fa09:45723 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f91c6cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:10:06,362 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 552d6a33fa09,36447,1733479804672 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T10:10:06,365 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T10:10:06,365 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T10:10:06,365 DEBUG [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T10:10:06,367 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:10:06,367 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:10:06,367 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:10:06,368 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:10:06,368 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/552d6a33fa09:0, corePoolSize=10, maxPoolSize=10 2024-12-06T10:10:06,368 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:10:06,368 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(3073): reportForDuty to master=552d6a33fa09,36447,1733479804672 with isa=552d6a33fa09/172.17.0.2:45723, startcode=1733479805452 2024-12-06T10:10:06,368 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:10:06,369 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:10:06,372 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733479836372 2024-12-06T10:10:06,374 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T10:10:06,376 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T10:10:06,380 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T10:10:06,382 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T10:10:06,382 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:10:06,383 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T10:10:06,383 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T10:10:06,383 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T10:10:06,388 DEBUG [RS:0;552d6a33fa09:45723 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:10:06,388 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:06,389 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T10:10:06,390 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T10:10:06,391 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T10:10:06,393 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:06,393 INFO [PEWorker-2 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:10:06,416 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T10:10:06,416 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T10:10:06,418 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733479806417,5,FailOnTimeoutGroup] 2024-12-06T10:10:06,419 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733479806418,5,FailOnTimeoutGroup] 2024-12-06T10:10:06,419 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:06,419 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T10:10:06,421 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:06,421 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:06,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741831_1007 (size=1039) 2024-12-06T10:10:06,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741831_1007 (size=1039) 2024-12-06T10:10:06,430 INFO [PEWorker-2 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T10:10:06,432 INFO [PEWorker-2 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9 2024-12-06T10:10:06,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741832_1008 (size=32) 2024-12-06T10:10:06,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741832_1008 (size=32) 2024-12-06T10:10:06,454 DEBUG [PEWorker-2 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:10:06,466 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:10:06,469 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:10:06,470 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:06,471 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:10:06,471 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:10:06,477 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:10:06,477 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:06,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:10:06,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:10:06,481 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:10:06,481 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:06,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:10:06,483 DEBUG [PEWorker-2 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740 2024-12-06T10:10:06,484 DEBUG [PEWorker-2 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740 2024-12-06T10:10:06,488 DEBUG [PEWorker-2 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:10:06,491 DEBUG [PEWorker-2 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:10:06,495 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:10:06,496 INFO [PEWorker-2 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=760525, jitterRate=-0.03294253349304199}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:10:06,498 DEBUG [PEWorker-2 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:10:06,498 DEBUG [PEWorker-2 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:10:06,498 INFO [PEWorker-2 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:10:06,498 DEBUG [PEWorker-2 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:10:06,499 DEBUG [PEWorker-2 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:10:06,499 DEBUG [PEWorker-2 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:10:06,500 INFO [PEWorker-2 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T10:10:06,500 DEBUG [PEWorker-2 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:10:06,503 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54589, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:10:06,504 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:10:06,504 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T10:10:06,509 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36447 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 552d6a33fa09,45723,1733479805452 2024-12-06T10:10:06,511 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36447 {}] master.ServerManager(486): Registering regionserver=552d6a33fa09,45723,1733479805452 2024-12-06T10:10:06,512 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T10:10:06,522 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T10:10:06,524 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T10:10:06,526 DEBUG [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9 2024-12-06T10:10:06,526 DEBUG [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:38219 2024-12-06T10:10:06,527 DEBUG [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T10:10:06,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:10:06,535 DEBUG [RS:0;552d6a33fa09:45723 {}] zookeeper.ZKUtil(111): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/552d6a33fa09,45723,1733479805452 2024-12-06T10:10:06,536 WARN [RS:0;552d6a33fa09:45723 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:10:06,536 INFO [RS:0;552d6a33fa09:45723 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:10:06,536 DEBUG [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452 2024-12-06T10:10:06,537 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [552d6a33fa09,45723,1733479805452] 2024-12-06T10:10:06,552 DEBUG [RS:0;552d6a33fa09:45723 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T10:10:06,568 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T10:10:06,584 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T10:10:06,588 INFO [RS:0;552d6a33fa09:45723 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T10:10:06,588 INFO [RS:0;552d6a33fa09:45723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:06,589 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T10:10:06,598 INFO [RS:0;552d6a33fa09:45723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:06,599 DEBUG [RS:0;552d6a33fa09:45723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:10:06,599 DEBUG [RS:0;552d6a33fa09:45723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:10:06,599 DEBUG [RS:0;552d6a33fa09:45723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:10:06,599 DEBUG [RS:0;552d6a33fa09:45723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:10:06,600 DEBUG [RS:0;552d6a33fa09:45723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:10:06,600 DEBUG [RS:0;552d6a33fa09:45723 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:10:06,600 DEBUG [RS:0;552d6a33fa09:45723 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:10:06,600 DEBUG [RS:0;552d6a33fa09:45723 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:10:06,600 DEBUG [RS:0;552d6a33fa09:45723 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:10:06,601 DEBUG [RS:0;552d6a33fa09:45723 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:10:06,601 DEBUG [RS:0;552d6a33fa09:45723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:10:06,601 DEBUG [RS:0;552d6a33fa09:45723 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:10:06,601 DEBUG [RS:0;552d6a33fa09:45723 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:10:06,602 INFO [RS:0;552d6a33fa09:45723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:06,602 INFO [RS:0;552d6a33fa09:45723 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:06,602 INFO [RS:0;552d6a33fa09:45723 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:06,603 INFO [RS:0;552d6a33fa09:45723 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:06,603 INFO [RS:0;552d6a33fa09:45723 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,45723,1733479805452-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:10:06,622 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T10:10:06,624 INFO [RS:0;552d6a33fa09:45723 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,45723,1733479805452-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:06,646 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.Replication(204): 552d6a33fa09,45723,1733479805452 started 2024-12-06T10:10:06,646 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(1767): Serving as 552d6a33fa09,45723,1733479805452, RpcServer on 552d6a33fa09/172.17.0.2:45723, sessionid=0x10066ce64640001 2024-12-06T10:10:06,647 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T10:10:06,647 DEBUG [RS:0;552d6a33fa09:45723 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 552d6a33fa09,45723,1733479805452 2024-12-06T10:10:06,647 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,45723,1733479805452' 2024-12-06T10:10:06,648 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T10:10:06,649 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T10:10:06,649 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T10:10:06,650 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T10:10:06,650 DEBUG [RS:0;552d6a33fa09:45723 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 552d6a33fa09,45723,1733479805452 2024-12-06T10:10:06,650 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,45723,1733479805452' 2024-12-06T10:10:06,650 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T10:10:06,650 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T10:10:06,651 DEBUG [RS:0;552d6a33fa09:45723 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T10:10:06,651 INFO [RS:0;552d6a33fa09:45723 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T10:10:06,651 INFO [RS:0;552d6a33fa09:45723 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T10:10:06,675 WARN [552d6a33fa09:36447 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-06T10:10:06,762 INFO [RS:0;552d6a33fa09:45723 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C45723%2C1733479805452, suffix=, logDir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452, archiveDir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/oldWALs, maxLogs=32 2024-12-06T10:10:06,766 INFO [RS:0;552d6a33fa09:45723 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45723%2C1733479805452.1733479806766 2024-12-06T10:10:06,777 INFO [RS:0;552d6a33fa09:45723 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479806766 2024-12-06T10:10:06,777 DEBUG [RS:0;552d6a33fa09:45723 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45777:45777),(127.0.0.1/127.0.0.1:35965:35965)] 2024-12-06T10:10:06,926 DEBUG [552d6a33fa09:36447 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T10:10:06,931 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=552d6a33fa09,45723,1733479805452 2024-12-06T10:10:06,936 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,45723,1733479805452, state=OPENING 2024-12-06T10:10:06,942 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T10:10:06,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:06,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:06,945 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:10:06,945 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:10:06,947 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=552d6a33fa09,45723,1733479805452}] 2024-12-06T10:10:07,121 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,45723,1733479805452 2024-12-06T10:10:07,123 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T10:10:07,127 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51232, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T10:10:07,140 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T10:10:07,141 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:10:07,145 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C45723%2C1733479805452.meta, suffix=.meta, logDir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452, archiveDir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/oldWALs, maxLogs=32 2024-12-06T10:10:07,148 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45723%2C1733479805452.meta.1733479807147.meta 2024-12-06T10:10:07,155 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.meta.1733479807147.meta 2024-12-06T10:10:07,155 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45777:45777),(127.0.0.1/127.0.0.1:35965:35965)] 2024-12-06T10:10:07,156 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:10:07,158 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T10:10:07,234 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T10:10:07,240 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T10:10:07,245 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T10:10:07,245 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:10:07,246 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T10:10:07,246 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T10:10:07,250 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:10:07,252 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:10:07,252 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:07,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:10:07,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:10:07,255 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:10:07,255 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:07,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:10:07,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:10:07,258 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:10:07,258 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:07,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:10:07,260 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740 2024-12-06T10:10:07,264 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740 2024-12-06T10:10:07,267 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:10:07,270 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:10:07,272 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=703660, jitterRate=-0.1052509993314743}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:10:07,273 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:10:07,280 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733479807116 2024-12-06T10:10:07,292 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T10:10:07,292 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T10:10:07,293 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,45723,1733479805452 2024-12-06T10:10:07,295 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,45723,1733479805452, state=OPEN 2024-12-06T10:10:07,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:10:07,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:10:07,300 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:10:07,300 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:10:07,304 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T10:10:07,304 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=552d6a33fa09,45723,1733479805452 in 354 msec 2024-12-06T10:10:07,310 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T10:10:07,310 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 794 msec 2024-12-06T10:10:07,315 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.0370 sec 2024-12-06T10:10:07,315 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733479807315, completionTime=-1 2024-12-06T10:10:07,316 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T10:10:07,316 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T10:10:07,352 DEBUG [hconnection-0x2e7f43da-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:10:07,354 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:10:07,367 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T10:10:07,367 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733479867367 2024-12-06T10:10:07,367 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733479927367 2024-12-06T10:10:07,367 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 51 msec 2024-12-06T10:10:07,391 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,36447,1733479804672-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:07,392 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,36447,1733479804672-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:07,392 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,36447,1733479804672-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:07,393 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-552d6a33fa09:36447, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:07,394 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:07,399 DEBUG [master/552d6a33fa09:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T10:10:07,402 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T10:10:07,403 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:10:07,409 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T10:10:07,412 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:10:07,413 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:07,415 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:10:07,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741835_1011 (size=358) 2024-12-06T10:10:07,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741835_1011 (size=358) 2024-12-06T10:10:07,431 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e76b4208dc794db96ec7e28efad78c92, NAME => 'hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9 2024-12-06T10:10:07,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741836_1012 (size=42) 2024-12-06T10:10:07,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741836_1012 (size=42) 2024-12-06T10:10:07,441 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:10:07,441 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing e76b4208dc794db96ec7e28efad78c92, disabling compactions & flushes 2024-12-06T10:10:07,442 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. 2024-12-06T10:10:07,442 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. 2024-12-06T10:10:07,442 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. after waiting 0 ms 2024-12-06T10:10:07,442 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. 2024-12-06T10:10:07,442 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. 2024-12-06T10:10:07,442 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for e76b4208dc794db96ec7e28efad78c92: 2024-12-06T10:10:07,444 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:10:07,450 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733479807445"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733479807445"}]},"ts":"1733479807445"} 2024-12-06T10:10:07,476 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:10:07,478 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:10:07,481 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733479807478"}]},"ts":"1733479807478"} 2024-12-06T10:10:07,485 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T10:10:07,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=e76b4208dc794db96ec7e28efad78c92, ASSIGN}] 2024-12-06T10:10:07,494 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=e76b4208dc794db96ec7e28efad78c92, ASSIGN 2024-12-06T10:10:07,496 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=e76b4208dc794db96ec7e28efad78c92, ASSIGN; state=OFFLINE, location=552d6a33fa09,45723,1733479805452; forceNewPlan=false, retain=false 2024-12-06T10:10:07,647 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=e76b4208dc794db96ec7e28efad78c92, regionState=OPENING, regionLocation=552d6a33fa09,45723,1733479805452 2024-12-06T10:10:07,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure e76b4208dc794db96ec7e28efad78c92, server=552d6a33fa09,45723,1733479805452}] 2024-12-06T10:10:07,805 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,45723,1733479805452 2024-12-06T10:10:07,813 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. 2024-12-06T10:10:07,813 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => e76b4208dc794db96ec7e28efad78c92, NAME => 'hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:10:07,813 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace e76b4208dc794db96ec7e28efad78c92 2024-12-06T10:10:07,814 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:10:07,814 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for e76b4208dc794db96ec7e28efad78c92 2024-12-06T10:10:07,814 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for e76b4208dc794db96ec7e28efad78c92 2024-12-06T10:10:07,816 INFO [StoreOpener-e76b4208dc794db96ec7e28efad78c92-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e76b4208dc794db96ec7e28efad78c92 2024-12-06T10:10:07,819 INFO [StoreOpener-e76b4208dc794db96ec7e28efad78c92-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e76b4208dc794db96ec7e28efad78c92 columnFamilyName info 2024-12-06T10:10:07,820 DEBUG [StoreOpener-e76b4208dc794db96ec7e28efad78c92-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:07,821 INFO [StoreOpener-e76b4208dc794db96ec7e28efad78c92-1 {}] regionserver.HStore(327): Store=e76b4208dc794db96ec7e28efad78c92/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:10:07,823 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/namespace/e76b4208dc794db96ec7e28efad78c92 2024-12-06T10:10:07,823 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/namespace/e76b4208dc794db96ec7e28efad78c92 2024-12-06T10:10:07,828 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for e76b4208dc794db96ec7e28efad78c92 2024-12-06T10:10:07,831 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/namespace/e76b4208dc794db96ec7e28efad78c92/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:10:07,832 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened e76b4208dc794db96ec7e28efad78c92; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734594, jitterRate=-0.06591580808162689}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:10:07,833 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for e76b4208dc794db96ec7e28efad78c92: 2024-12-06T10:10:07,835 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92., pid=6, masterSystemTime=1733479807805 2024-12-06T10:10:07,839 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. 2024-12-06T10:10:07,839 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. 2024-12-06T10:10:07,840 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=e76b4208dc794db96ec7e28efad78c92, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,45723,1733479805452 2024-12-06T10:10:07,853 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T10:10:07,853 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure e76b4208dc794db96ec7e28efad78c92, server=552d6a33fa09,45723,1733479805452 in 198 msec 2024-12-06T10:10:07,857 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T10:10:07,857 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=e76b4208dc794db96ec7e28efad78c92, ASSIGN in 361 msec 2024-12-06T10:10:07,858 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:10:07,859 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733479807859"}]},"ts":"1733479807859"} 2024-12-06T10:10:07,862 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T10:10:07,866 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:10:07,869 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 462 msec 2024-12-06T10:10:07,913 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T10:10:07,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:07,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:10:07,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:10:07,952 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T10:10:07,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:10:07,978 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 31 msec 2024-12-06T10:10:07,988 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T10:10:08,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:10:08,008 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 20 msec 2024-12-06T10:10:08,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T10:10:08,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T10:10:08,030 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 2.499sec 2024-12-06T10:10:08,032 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T10:10:08,034 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T10:10:08,035 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T10:10:08,036 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T10:10:08,036 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T10:10:08,037 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,36447,1733479804672-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:10:08,038 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,36447,1733479804672-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T10:10:08,045 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T10:10:08,046 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T10:10:08,047 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,36447,1733479804672-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:10:08,122 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c4612d8 to 127.0.0.1:49614 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@87fdff8 2024-12-06T10:10:08,122 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-06T10:10:08,132 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b0c09d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:10:08,135 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-06T10:10:08,135 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-06T10:10:08,149 DEBUG [hconnection-0x3878e8d1-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:10:08,189 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51250, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:10:08,200 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=552d6a33fa09,36447,1733479804672 2024-12-06T10:10:08,201 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:10:08,210 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-06T10:10:08,218 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T10:10:08,223 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T10:10:08,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36447 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T10:10:08,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36447 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T10:10:08,237 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36447 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:10:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36447 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-06T10:10:08,242 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:10:08,243 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36447 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 9 2024-12-06T10:10:08,243 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:08,245 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:10:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36447 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:10:08,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741837_1013 (size=389) 2024-12-06T10:10:08,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741837_1013 (size=389) 2024-12-06T10:10:08,319 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9af4fdd9b2124cf8ec0e48e3ff7ba18e, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9 2024-12-06T10:10:08,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741838_1014 (size=72) 2024-12-06T10:10:08,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741838_1014 (size=72) 2024-12-06T10:10:08,343 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:10:08,343 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing 9af4fdd9b2124cf8ec0e48e3ff7ba18e, disabling compactions & flushes 2024-12-06T10:10:08,343 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:10:08,344 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:10:08,344 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. after waiting 0 ms 2024-12-06T10:10:08,344 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:10:08,344 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:10:08,344 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9af4fdd9b2124cf8ec0e48e3ff7ba18e: 2024-12-06T10:10:08,346 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:10:08,346 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733479808346"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733479808346"}]},"ts":"1733479808346"} 2024-12-06T10:10:08,350 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:10:08,352 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:10:08,352 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733479808352"}]},"ts":"1733479808352"} 2024-12-06T10:10:08,355 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-06T10:10:08,360 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9af4fdd9b2124cf8ec0e48e3ff7ba18e, ASSIGN}] 2024-12-06T10:10:08,363 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9af4fdd9b2124cf8ec0e48e3ff7ba18e, ASSIGN 2024-12-06T10:10:08,364 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9af4fdd9b2124cf8ec0e48e3ff7ba18e, ASSIGN; state=OFFLINE, location=552d6a33fa09,45723,1733479805452; forceNewPlan=false, retain=false 2024-12-06T10:10:08,515 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=9af4fdd9b2124cf8ec0e48e3ff7ba18e, regionState=OPENING, regionLocation=552d6a33fa09,45723,1733479805452 2024-12-06T10:10:08,520 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 9af4fdd9b2124cf8ec0e48e3ff7ba18e, server=552d6a33fa09,45723,1733479805452}] 2024-12-06T10:10:08,674 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,45723,1733479805452 2024-12-06T10:10:08,682 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:10:08,682 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 9af4fdd9b2124cf8ec0e48e3ff7ba18e, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:10:08,683 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 9af4fdd9b2124cf8ec0e48e3ff7ba18e 2024-12-06T10:10:08,683 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:10:08,683 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 9af4fdd9b2124cf8ec0e48e3ff7ba18e 2024-12-06T10:10:08,683 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 9af4fdd9b2124cf8ec0e48e3ff7ba18e 2024-12-06T10:10:08,686 INFO [StoreOpener-9af4fdd9b2124cf8ec0e48e3ff7ba18e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9af4fdd9b2124cf8ec0e48e3ff7ba18e 2024-12-06T10:10:08,688 INFO [StoreOpener-9af4fdd9b2124cf8ec0e48e3ff7ba18e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9af4fdd9b2124cf8ec0e48e3ff7ba18e columnFamilyName info 2024-12-06T10:10:08,688 DEBUG [StoreOpener-9af4fdd9b2124cf8ec0e48e3ff7ba18e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:10:08,689 INFO [StoreOpener-9af4fdd9b2124cf8ec0e48e3ff7ba18e-1 {}] regionserver.HStore(327): Store=9af4fdd9b2124cf8ec0e48e3ff7ba18e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:10:08,691 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e 2024-12-06T10:10:08,691 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e 2024-12-06T10:10:08,696 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 9af4fdd9b2124cf8ec0e48e3ff7ba18e 2024-12-06T10:10:08,700 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:10:08,700 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 9af4fdd9b2124cf8ec0e48e3ff7ba18e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735191, jitterRate=-0.06515663862228394}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:10:08,702 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 9af4fdd9b2124cf8ec0e48e3ff7ba18e: 2024-12-06T10:10:08,703 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e., pid=11, masterSystemTime=1733479808674 2024-12-06T10:10:08,707 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:10:08,707 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:10:08,708 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=9af4fdd9b2124cf8ec0e48e3ff7ba18e, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,45723,1733479805452 2024-12-06T10:10:08,716 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-06T10:10:08,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 9af4fdd9b2124cf8ec0e48e3ff7ba18e, server=552d6a33fa09,45723,1733479805452 in 192 msec 2024-12-06T10:10:08,729 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-06T10:10:08,729 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9af4fdd9b2124cf8ec0e48e3ff7ba18e, ASSIGN in 356 msec 2024-12-06T10:10:08,730 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:10:08,731 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733479808730"}]},"ts":"1733479808730"} 2024-12-06T10:10:08,734 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-06T10:10:08,739 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:10:08,743 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 502 msec 2024-12-06T10:10:12,749 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T10:10:12,805 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T10:10:12,807 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-06T10:10:12,808 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-06T10:10:15,196 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T10:10:15,197 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-06T10:10:15,198 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-06T10:10:15,198 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-06T10:10:15,199 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-06T10:10:15,199 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-06T10:10:15,200 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T10:10:15,200 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-06T10:10:15,201 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-06T10:10:15,201 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-06T10:10:18,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36447 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:10:18,294 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling, procId: 9 completed 2024-12-06T10:10:18,298 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-06T10:10:18,299 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:10:18,300 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45723%2C1733479805452.1733479818300 2024-12-06T10:10:18,311 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479806766 with entries=4, filesize=947 B; new WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479818300 2024-12-06T10:10:18,312 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45777:45777),(127.0.0.1/127.0.0.1:35965:35965)] 2024-12-06T10:10:18,312 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479806766 is not closed yet, will try archiving it next time 2024-12-06T10:10:18,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741833_1009 (size=955) 2024-12-06T10:10:18,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741833_1009 (size=955) 2024-12-06T10:10:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45723 {}] regionserver.HRegion(8581): Flush requested on 9af4fdd9b2124cf8ec0e48e3ff7ba18e 2024-12-06T10:10:30,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9af4fdd9b2124cf8ec0e48e3ff7ba18e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T10:10:30,418 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/8799dd26591149be8153c56922a66d1d is 1080, key is row0001/info:/1733479818317/Put/seqid=0 2024-12-06T10:10:30,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741840_1016 (size=12509) 2024-12-06T10:10:30,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741840_1016 (size=12509) 2024-12-06T10:10:30,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/8799dd26591149be8153c56922a66d1d 2024-12-06T10:10:30,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/8799dd26591149be8153c56922a66d1d as hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/8799dd26591149be8153c56922a66d1d 2024-12-06T10:10:30,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/8799dd26591149be8153c56922a66d1d, entries=7, sequenceid=11, filesize=12.2 K 2024-12-06T10:10:30,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9af4fdd9b2124cf8ec0e48e3ff7ba18e in 159ms, sequenceid=11, compaction requested=false 2024-12-06T10:10:30,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9af4fdd9b2124cf8ec0e48e3ff7ba18e: 2024-12-06T10:10:33,703 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:10:36,362 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:10:36,365 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47284, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:10:38,352 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45723%2C1733479805452.1733479838352 2024-12-06T10:10:38,560 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:38,562 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479818300 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479838352 2024-12-06T10:10:38,562 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45777:45777),(127.0.0.1/127.0.0.1:35965:35965)] 2024-12-06T10:10:38,562 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479818300 is not closed yet, will try archiving it next time 2024-12-06T10:10:38,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741839_1015 (size=12399) 2024-12-06T10:10:38,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741839_1015 (size=12399) 2024-12-06T10:10:38,765 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:40,968 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:43,171 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:45,375 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:45,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45723 {}] regionserver.HRegion(8581): Flush requested on 9af4fdd9b2124cf8ec0e48e3ff7ba18e 2024-12-06T10:10:45,376 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9af4fdd9b2124cf8ec0e48e3ff7ba18e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T10:10:45,577 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:45,583 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/e1f96c2c14c04285b6f7a9df4efc76f5 is 1080, key is row0008/info:/1733479832343/Put/seqid=0 2024-12-06T10:10:45,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741842_1018 (size=12509) 2024-12-06T10:10:45,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741842_1018 (size=12509) 2024-12-06T10:10:45,592 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/e1f96c2c14c04285b6f7a9df4efc76f5 2024-12-06T10:10:45,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/e1f96c2c14c04285b6f7a9df4efc76f5 as hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/e1f96c2c14c04285b6f7a9df4efc76f5 2024-12-06T10:10:45,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/e1f96c2c14c04285b6f7a9df4efc76f5, entries=7, sequenceid=21, filesize=12.2 K 2024-12-06T10:10:45,815 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:45,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9af4fdd9b2124cf8ec0e48e3ff7ba18e in 440ms, sequenceid=21, compaction requested=false 2024-12-06T10:10:45,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9af4fdd9b2124cf8ec0e48e3ff7ba18e: 2024-12-06T10:10:45,816 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=24.4 K, sizeToCheck=16.0 K 2024-12-06T10:10:45,816 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T10:10:45,817 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/8799dd26591149be8153c56922a66d1d because midkey is the same as first or last row 2024-12-06T10:10:47,578 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:48,166 INFO [master/552d6a33fa09:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T10:10:48,166 INFO [master/552d6a33fa09:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T10:10:49,782 WARN [sync.1 {}] wal.AbstractFSWAL(1346): Requesting log roll because we exceeded slow sync threshold; count=7, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:49,783 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C45723%2C1733479805452:(num 1733479838352) roll requested 2024-12-06T10:10:49,783 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:49,784 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45723%2C1733479805452.1733479849783 2024-12-06T10:10:49,993 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:50,194 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:50,195 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479838352 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479849783 2024-12-06T10:10:50,195 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45777:45777),(127.0.0.1/127.0.0.1:35965:35965)] 2024-12-06T10:10:50,195 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479838352 is not closed yet, will try archiving it next time 2024-12-06T10:10:50,196 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479818300 to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/oldWALs/552d6a33fa09%2C45723%2C1733479805452.1733479818300 2024-12-06T10:10:50,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741841_1017 (size=7739) 2024-12-06T10:10:50,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741841_1017 (size=7739) 2024-12-06T10:10:51,985 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:53,683 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 9af4fdd9b2124cf8ec0e48e3ff7ba18e, had cached 0 bytes from a total of 25018 2024-12-06T10:10:54,189 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:56,392 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:10:58,595 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:00,597 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T10:11:00,598 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45723%2C1733479805452.1733479860597 2024-12-06T10:11:03,704 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:11:05,607 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:05,607 WARN [Time-limited test {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:05,607 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C45723%2C1733479805452:(num 1733479860597) roll requested 2024-12-06T10:11:08,211 DEBUG [master/552d6a33fa09:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region e76b4208dc794db96ec7e28efad78c92 changed from -1.0 to 0.0, refreshing cache 2024-12-06T10:11:10,608 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:10,608 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:10,609 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479849783 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479860597 2024-12-06T10:11:10,609 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35965:35965),(127.0.0.1/127.0.0.1:45777:45777)] 2024-12-06T10:11:10,609 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479849783 is not closed yet, will try archiving it next time 2024-12-06T10:11:10,610 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45723%2C1733479805452.1733479870609 2024-12-06T10:11:10,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741843_1019 (size=4753) 2024-12-06T10:11:10,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741843_1019 (size=4753) 2024-12-06T10:11:15,612 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK], DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK]] 2024-12-06T10:11:15,612 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK], DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK]] 2024-12-06T10:11:15,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45723 {}] regionserver.HRegion(8581): Flush requested on 9af4fdd9b2124cf8ec0e48e3ff7ba18e 2024-12-06T10:11:15,613 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9af4fdd9b2124cf8ec0e48e3ff7ba18e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T10:11:15,621 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK], DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK]] 2024-12-06T10:11:15,622 WARN [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK], DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK]] 2024-12-06T10:11:17,614 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T10:11:20,614 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK], DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK]] 2024-12-06T10:11:20,614 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK], DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK]] 2024-12-06T10:11:20,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/14740707ef6d4c54b966f30c8171c3e6 is 1080, key is row0015/info:/1733479847377/Put/seqid=0 2024-12-06T10:11:20,622 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK], DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK]] 2024-12-06T10:11:20,622 WARN [sync.0 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK], DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK]] 2024-12-06T10:11:20,623 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479860597 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479870609 2024-12-06T10:11:20,623 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45777:45777),(127.0.0.1/127.0.0.1:35965:35965)] 2024-12-06T10:11:20,623 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479860597 is not closed yet, will try archiving it next time 2024-12-06T10:11:20,623 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C45723%2C1733479805452:(num 1733479870609) roll requested 2024-12-06T10:11:20,624 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45723%2C1733479805452.1733479880623 2024-12-06T10:11:20,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741844_1020 (size=1569) 2024-12-06T10:11:20,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741844_1020 (size=1569) 2024-12-06T10:11:20,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741846_1022 (size=12509) 2024-12-06T10:11:20,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741846_1022 (size=12509) 2024-12-06T10:11:20,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/14740707ef6d4c54b966f30c8171c3e6 2024-12-06T10:11:20,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/14740707ef6d4c54b966f30c8171c3e6 as hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/14740707ef6d4c54b966f30c8171c3e6 2024-12-06T10:11:20,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/14740707ef6d4c54b966f30c8171c3e6, entries=7, sequenceid=31, filesize=12.2 K 2024-12-06T10:11:25,632 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:25,632 WARN [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:25,657 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:25,657 WARN [sync.1 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:25,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9af4fdd9b2124cf8ec0e48e3ff7ba18e in 10045ms, sequenceid=31, compaction requested=true 2024-12-06T10:11:25,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9af4fdd9b2124cf8ec0e48e3ff7ba18e: 2024-12-06T10:11:25,658 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=36.6 K, sizeToCheck=16.0 K 2024-12-06T10:11:25,658 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T10:11:25,658 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/8799dd26591149be8153c56922a66d1d because midkey is the same as first or last row 2024-12-06T10:11:25,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9af4fdd9b2124cf8ec0e48e3ff7ba18e:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:11:25,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:11:25,660 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:11:25,663 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:11:25,664 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] regionserver.HStore(1540): 9af4fdd9b2124cf8ec0e48e3ff7ba18e/info is initiating minor compaction (all files) 2024-12-06T10:11:25,664 INFO [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9af4fdd9b2124cf8ec0e48e3ff7ba18e/info in TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:11:25,664 INFO [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/8799dd26591149be8153c56922a66d1d, hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/e1f96c2c14c04285b6f7a9df4efc76f5, hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/14740707ef6d4c54b966f30c8171c3e6] into tmpdir=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp, totalSize=36.6 K 2024-12-06T10:11:25,666 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8799dd26591149be8153c56922a66d1d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733479818317 2024-12-06T10:11:25,666 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1f96c2c14c04285b6f7a9df4efc76f5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733479832343 2024-12-06T10:11:25,667 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14740707ef6d4c54b966f30c8171c3e6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733479847377 2024-12-06T10:11:25,692 INFO [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9af4fdd9b2124cf8ec0e48e3ff7ba18e#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:11:25,693 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/ed490d6d06d1463e87d4c8264a063be0 is 1080, key is row0001/info:/1733479818317/Put/seqid=0 2024-12-06T10:11:25,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741848_1024 (size=27710) 2024-12-06T10:11:25,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741848_1024 (size=27710) 2024-12-06T10:11:25,711 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/ed490d6d06d1463e87d4c8264a063be0 as hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/ed490d6d06d1463e87d4c8264a063be0 2024-12-06T10:11:30,632 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:30,632 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:30,634 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479870609 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479880623 2024-12-06T10:11:30,634 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45777:45777),(127.0.0.1/127.0.0.1:35965:35965)] 2024-12-06T10:11:30,634 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479870609 is not closed yet, will try archiving it next time 2024-12-06T10:11:30,634 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479838352 to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/oldWALs/552d6a33fa09%2C45723%2C1733479805452.1733479838352 2024-12-06T10:11:30,634 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C45723%2C1733479805452:(num 1733479880623) roll requested 2024-12-06T10:11:30,634 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45723%2C1733479805452.1733479890634 2024-12-06T10:11:30,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741845_1021 (size=438) 2024-12-06T10:11:30,636 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479849783 to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/oldWALs/552d6a33fa09%2C45723%2C1733479805452.1733479849783 2024-12-06T10:11:30,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741845_1021 (size=438) 2024-12-06T10:11:30,638 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479860597 to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/oldWALs/552d6a33fa09%2C45723%2C1733479805452.1733479860597 2024-12-06T10:11:30,639 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479870609 to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/oldWALs/552d6a33fa09%2C45723%2C1733479805452.1733479870609 2024-12-06T10:11:33,704 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:11:35,635 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:35,635 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:35,636 INFO [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9af4fdd9b2124cf8ec0e48e3ff7ba18e/info of 9af4fdd9b2124cf8ec0e48e3ff7ba18e into ed490d6d06d1463e87d4c8264a063be0(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 9sec to execute. 2024-12-06T10:11:35,636 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9af4fdd9b2124cf8ec0e48e3ff7ba18e: 2024-12-06T10:11:35,637 INFO [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e., storeName=9af4fdd9b2124cf8ec0e48e3ff7ba18e/info, priority=13, startTime=1733479885659; duration=9sec 2024-12-06T10:11:35,637 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=27.1 K, sizeToCheck=16.0 K 2024-12-06T10:11:35,637 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T10:11:35,637 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/ed490d6d06d1463e87d4c8264a063be0 because midkey is the same as first or last row 2024-12-06T10:11:35,637 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:11:35,637 DEBUG [RS:0;552d6a33fa09:45723-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9af4fdd9b2124cf8ec0e48e3ff7ba18e:info 2024-12-06T10:11:35,646 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:35,646 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41843,DS-b2d41ff9-5209-46c2-847c-3ebe775ecfff,DISK], DatanodeInfoWithStorage[127.0.0.1:42563,DS-aa25ae26-9fd4-4556-a20b-6fb01b202dc9,DISK]] 2024-12-06T10:11:35,647 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479880623 with entries=1, filesize=531 B; new WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479890634 2024-12-06T10:11:35,647 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35965:35965),(127.0.0.1/127.0.0.1:45777:45777)] 2024-12-06T10:11:35,647 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479880623 is not closed yet, will try archiving it next time 2024-12-06T10:11:35,647 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45723%2C1733479805452.1733479895647 2024-12-06T10:11:35,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741847_1023 (size=539) 2024-12-06T10:11:35,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741847_1023 (size=539) 2024-12-06T10:11:35,650 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479880623 to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/oldWALs/552d6a33fa09%2C45723%2C1733479805452.1733479880623 2024-12-06T10:11:35,659 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479890634 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479895647 2024-12-06T10:11:35,660 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35965:35965),(127.0.0.1/127.0.0.1:45777:45777)] 2024-12-06T10:11:35,660 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479890634 is not closed yet, will try archiving it next time 2024-12-06T10:11:35,660 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C45723%2C1733479805452:(num 1733479895647) roll requested 2024-12-06T10:11:35,660 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45723%2C1733479805452.1733479895660 2024-12-06T10:11:35,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741849_1025 (size=1258) 2024-12-06T10:11:35,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741849_1025 (size=1258) 2024-12-06T10:11:35,668 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479895647 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479895660 2024-12-06T10:11:35,668 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35965:35965),(127.0.0.1/127.0.0.1:45777:45777)] 2024-12-06T10:11:35,668 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479895647 is not closed yet, will try archiving it next time 2024-12-06T10:11:35,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741850_1026 (size=93) 2024-12-06T10:11:35,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741850_1026 (size=93) 2024-12-06T10:11:35,671 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452/552d6a33fa09%2C45723%2C1733479805452.1733479895647 to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/oldWALs/552d6a33fa09%2C45723%2C1733479805452.1733479895647 2024-12-06T10:11:38,683 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 9af4fdd9b2124cf8ec0e48e3ff7ba18e, had cached 0 bytes from a total of 27710 2024-12-06T10:11:47,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45723 {}] regionserver.HRegion(8581): Flush requested on 9af4fdd9b2124cf8ec0e48e3ff7ba18e 2024-12-06T10:11:47,668 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9af4fdd9b2124cf8ec0e48e3ff7ba18e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T10:11:47,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/d29268975fed4daabefcf947d7674006 is 1080, key is row0022/info:/1733479895648/Put/seqid=0 2024-12-06T10:11:47,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741852_1028 (size=12509) 2024-12-06T10:11:47,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741852_1028 (size=12509) 2024-12-06T10:11:47,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/d29268975fed4daabefcf947d7674006 2024-12-06T10:11:47,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/d29268975fed4daabefcf947d7674006 as hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/d29268975fed4daabefcf947d7674006 2024-12-06T10:11:47,732 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/d29268975fed4daabefcf947d7674006, entries=7, sequenceid=42, filesize=12.2 K 2024-12-06T10:11:47,733 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9af4fdd9b2124cf8ec0e48e3ff7ba18e in 65ms, sequenceid=42, compaction requested=false 2024-12-06T10:11:47,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9af4fdd9b2124cf8ec0e48e3ff7ba18e: 2024-12-06T10:11:47,734 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=39.3 K, sizeToCheck=16.0 K 2024-12-06T10:11:47,734 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T10:11:47,734 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/ed490d6d06d1463e87d4c8264a063be0 because midkey is the same as first or last row 2024-12-06T10:11:55,681 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T10:11:55,682 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T10:11:55,682 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c4612d8 to 127.0.0.1:49614 2024-12-06T10:11:55,682 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:11:55,683 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T10:11:55,683 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=171534581, stopped=false 2024-12-06T10:11:55,683 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=552d6a33fa09,36447,1733479804672 2024-12-06T10:11:55,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:11:55,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:11:55,685 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T10:11:55,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:55,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:55,686 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:11:55,686 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,45723,1733479805452' ***** 2024-12-06T10:11:55,686 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T10:11:55,686 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T10:11:55,686 INFO [RS:0;552d6a33fa09:45723 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T10:11:55,686 INFO [RS:0;552d6a33fa09:45723 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T10:11:55,687 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(3579): Received CLOSE for e76b4208dc794db96ec7e28efad78c92 2024-12-06T10:11:55,687 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:11:55,687 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T10:11:55,688 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:11:55,692 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(3579): Received CLOSE for 9af4fdd9b2124cf8ec0e48e3ff7ba18e 2024-12-06T10:11:55,692 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,45723,1733479805452 2024-12-06T10:11:55,692 DEBUG [RS:0;552d6a33fa09:45723 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:11:55,692 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T10:11:55,692 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T10:11:55,692 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T10:11:55,693 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing e76b4208dc794db96ec7e28efad78c92, disabling compactions & flushes 2024-12-06T10:11:55,693 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T10:11:55,693 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. 2024-12-06T10:11:55,693 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. 2024-12-06T10:11:55,693 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. after waiting 0 ms 2024-12-06T10:11:55,693 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. 2024-12-06T10:11:55,693 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing e76b4208dc794db96ec7e28efad78c92 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-06T10:11:55,693 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-06T10:11:55,693 DEBUG [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, e76b4208dc794db96ec7e28efad78c92=hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92., 9af4fdd9b2124cf8ec0e48e3ff7ba18e=TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e.} 2024-12-06T10:11:55,693 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:11:55,694 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:11:55,694 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:11:55,694 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:11:55,694 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:11:55,694 DEBUG [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 9af4fdd9b2124cf8ec0e48e3ff7ba18e, e76b4208dc794db96ec7e28efad78c92 2024-12-06T10:11:55,694 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.81 KB heapSize=5.32 KB 2024-12-06T10:11:55,715 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/namespace/e76b4208dc794db96ec7e28efad78c92/.tmp/info/a8ab835172864e69ab0a7f19935097fe is 45, key is default/info:d/1733479807964/Put/seqid=0 2024-12-06T10:11:55,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741853_1029 (size=5037) 2024-12-06T10:11:55,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741853_1029 (size=5037) 2024-12-06T10:11:55,727 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/namespace/e76b4208dc794db96ec7e28efad78c92/.tmp/info/a8ab835172864e69ab0a7f19935097fe 2024-12-06T10:11:55,728 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740/.tmp/info/69b5bc3946724bb6bf0985d7dabe2c97 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e./info:regioninfo/1733479808708/Put/seqid=0 2024-12-06T10:11:55,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741854_1030 (size=8172) 2024-12-06T10:11:55,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741854_1030 (size=8172) 2024-12-06T10:11:55,738 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.59 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740/.tmp/info/69b5bc3946724bb6bf0985d7dabe2c97 2024-12-06T10:11:55,741 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/namespace/e76b4208dc794db96ec7e28efad78c92/.tmp/info/a8ab835172864e69ab0a7f19935097fe as hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/namespace/e76b4208dc794db96ec7e28efad78c92/info/a8ab835172864e69ab0a7f19935097fe 2024-12-06T10:11:55,752 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/namespace/e76b4208dc794db96ec7e28efad78c92/info/a8ab835172864e69ab0a7f19935097fe, entries=2, sequenceid=6, filesize=4.9 K 2024-12-06T10:11:55,753 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for e76b4208dc794db96ec7e28efad78c92 in 60ms, sequenceid=6, compaction requested=false 2024-12-06T10:11:55,777 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/namespace/e76b4208dc794db96ec7e28efad78c92/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-06T10:11:55,777 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740/.tmp/table/9c692f9fa24f4aea88f100daaa365dde is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733479808730/Put/seqid=0 2024-12-06T10:11:55,780 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. 2024-12-06T10:11:55,780 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for e76b4208dc794db96ec7e28efad78c92: 2024-12-06T10:11:55,781 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733479807402.e76b4208dc794db96ec7e28efad78c92. 2024-12-06T10:11:55,781 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 9af4fdd9b2124cf8ec0e48e3ff7ba18e, disabling compactions & flushes 2024-12-06T10:11:55,781 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:11:55,781 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:11:55,781 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. after waiting 0 ms 2024-12-06T10:11:55,781 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:11:55,782 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 9af4fdd9b2124cf8ec0e48e3ff7ba18e 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-06T10:11:55,789 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/35a908c50144406baaf8eb041543a5ce is 1080, key is row0029/info:/1733479909670/Put/seqid=0 2024-12-06T10:11:55,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741855_1031 (size=5452) 2024-12-06T10:11:55,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741855_1031 (size=5452) 2024-12-06T10:11:55,803 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=232 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740/.tmp/table/9c692f9fa24f4aea88f100daaa365dde 2024-12-06T10:11:55,814 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740/.tmp/info/69b5bc3946724bb6bf0985d7dabe2c97 as hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740/info/69b5bc3946724bb6bf0985d7dabe2c97 2024-12-06T10:11:55,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741856_1032 (size=8193) 2024-12-06T10:11:55,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741856_1032 (size=8193) 2024-12-06T10:11:55,825 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740/info/69b5bc3946724bb6bf0985d7dabe2c97, entries=20, sequenceid=14, filesize=8.0 K 2024-12-06T10:11:55,832 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/35a908c50144406baaf8eb041543a5ce 2024-12-06T10:11:55,833 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740/.tmp/table/9c692f9fa24f4aea88f100daaa365dde as hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740/table/9c692f9fa24f4aea88f100daaa365dde 2024-12-06T10:11:55,843 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/.tmp/info/35a908c50144406baaf8eb041543a5ce as hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/35a908c50144406baaf8eb041543a5ce 2024-12-06T10:11:55,852 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740/table/9c692f9fa24f4aea88f100daaa365dde, entries=4, sequenceid=14, filesize=5.3 K 2024-12-06T10:11:55,860 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.81 KB/2882, heapSize ~5.04 KB/5160, currentSize=0 B/0 for 1588230740 in 161ms, sequenceid=14, compaction requested=false 2024-12-06T10:11:55,861 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/35a908c50144406baaf8eb041543a5ce, entries=3, sequenceid=48, filesize=8.0 K 2024-12-06T10:11:55,862 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 9af4fdd9b2124cf8ec0e48e3ff7ba18e in 80ms, sequenceid=48, compaction requested=true 2024-12-06T10:11:55,868 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/8799dd26591149be8153c56922a66d1d, hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/e1f96c2c14c04285b6f7a9df4efc76f5, hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/14740707ef6d4c54b966f30c8171c3e6] to archive 2024-12-06T10:11:55,873 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:11:55,879 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/8799dd26591149be8153c56922a66d1d to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/8799dd26591149be8153c56922a66d1d 2024-12-06T10:11:55,880 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-06T10:11:55,881 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T10:11:55,881 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T10:11:55,881 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:11:55,882 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T10:11:55,882 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/e1f96c2c14c04285b6f7a9df4efc76f5 to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/e1f96c2c14c04285b6f7a9df4efc76f5 2024-12-06T10:11:55,884 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/14740707ef6d4c54b966f30c8171c3e6 to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/info/14740707ef6d4c54b966f30c8171c3e6 2024-12-06T10:11:55,894 DEBUG [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(1629): Waiting on 9af4fdd9b2124cf8ec0e48e3ff7ba18e 2024-12-06T10:11:55,907 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/data/default/TestLogRolling-testSlowSyncLogRolling/9af4fdd9b2124cf8ec0e48e3ff7ba18e/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-06T10:11:55,908 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:11:55,908 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 9af4fdd9b2124cf8ec0e48e3ff7ba18e: 2024-12-06T10:11:55,908 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733479808232.9af4fdd9b2124cf8ec0e48e3ff7ba18e. 2024-12-06T10:11:56,094 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,45723,1733479805452; all regions closed. 2024-12-06T10:11:56,096 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452 2024-12-06T10:11:56,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741834_1010 (size=4330) 2024-12-06T10:11:56,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741834_1010 (size=4330) 2024-12-06T10:11:56,102 DEBUG [RS:0;552d6a33fa09:45723 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/oldWALs 2024-12-06T10:11:56,102 INFO [RS:0;552d6a33fa09:45723 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 552d6a33fa09%2C45723%2C1733479805452.meta:.meta(num 1733479807147) 2024-12-06T10:11:56,103 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/WALs/552d6a33fa09,45723,1733479805452 2024-12-06T10:11:56,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741851_1027 (size=13066) 2024-12-06T10:11:56,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741851_1027 (size=13066) 2024-12-06T10:11:56,111 DEBUG [RS:0;552d6a33fa09:45723 {}] wal.AbstractFSWAL(1071): Moved 3 WAL file(s) to /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/oldWALs 2024-12-06T10:11:56,112 INFO [RS:0;552d6a33fa09:45723 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 552d6a33fa09%2C45723%2C1733479805452:(num 1733479895660) 2024-12-06T10:11:56,112 DEBUG [RS:0;552d6a33fa09:45723 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:11:56,112 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:11:56,112 INFO [RS:0;552d6a33fa09:45723 {}] hbase.ChoreService(370): Chore service for: regionserver/552d6a33fa09:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-06T10:11:56,112 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:11:56,113 INFO [RS:0;552d6a33fa09:45723 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45723 2024-12-06T10:11:56,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/552d6a33fa09,45723,1733479805452 2024-12-06T10:11:56,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:11:56,119 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [552d6a33fa09,45723,1733479805452] 2024-12-06T10:11:56,119 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 552d6a33fa09,45723,1733479805452; numProcessing=1 2024-12-06T10:11:56,121 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/552d6a33fa09,45723,1733479805452 already deleted, retry=false 2024-12-06T10:11:56,121 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 552d6a33fa09,45723,1733479805452 expired; onlineServers=0 2024-12-06T10:11:56,121 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,36447,1733479804672' ***** 2024-12-06T10:11:56,121 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T10:11:56,121 DEBUG [M:0;552d6a33fa09:36447 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dc960f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:11:56,121 INFO [M:0;552d6a33fa09:36447 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,36447,1733479804672 2024-12-06T10:11:56,121 INFO [M:0;552d6a33fa09:36447 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,36447,1733479804672; all regions closed. 2024-12-06T10:11:56,121 DEBUG [M:0;552d6a33fa09:36447 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:11:56,121 DEBUG [M:0;552d6a33fa09:36447 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T10:11:56,122 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T10:11:56,122 DEBUG [M:0;552d6a33fa09:36447 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T10:11:56,122 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733479806418 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733479806418,5,FailOnTimeoutGroup] 2024-12-06T10:11:56,122 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733479806417 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733479806417,5,FailOnTimeoutGroup] 2024-12-06T10:11:56,122 INFO [M:0;552d6a33fa09:36447 {}] hbase.ChoreService(370): Chore service for: master/552d6a33fa09:0 had [] on shutdown 2024-12-06T10:11:56,122 DEBUG [M:0;552d6a33fa09:36447 {}] master.HMaster(1733): Stopping service threads 2024-12-06T10:11:56,122 INFO [M:0;552d6a33fa09:36447 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T10:11:56,122 INFO [M:0;552d6a33fa09:36447 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T10:11:56,123 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T10:11:56,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T10:11:56,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:56,124 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:11:56,125 DEBUG [M:0;552d6a33fa09:36447 {}] zookeeper.ZKUtil(347): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T10:11:56,125 WARN [M:0;552d6a33fa09:36447 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T10:11:56,125 INFO [M:0;552d6a33fa09:36447 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T10:11:56,125 INFO [M:0;552d6a33fa09:36447 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T10:11:56,126 DEBUG [M:0;552d6a33fa09:36447 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:11:56,126 INFO [M:0;552d6a33fa09:36447 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:11:56,126 DEBUG [M:0;552d6a33fa09:36447 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:11:56,126 DEBUG [M:0;552d6a33fa09:36447 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:11:56,126 DEBUG [M:0;552d6a33fa09:36447 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:11:56,126 INFO [M:0;552d6a33fa09:36447 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.21 KB heapSize=50.14 KB 2024-12-06T10:11:56,155 DEBUG [M:0;552d6a33fa09:36447 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5019ddbcc91d4336a51780328a623d67 is 82, key is hbase:meta,,1/info:regioninfo/1733479807292/Put/seqid=0 2024-12-06T10:11:56,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741857_1033 (size=5672) 2024-12-06T10:11:56,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741857_1033 (size=5672) 2024-12-06T10:11:56,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:11:56,219 INFO [RS:0;552d6a33fa09:45723 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,45723,1733479805452; zookeeper connection closed. 2024-12-06T10:11:56,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45723-0x10066ce64640001, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:11:56,220 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6792720b {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6792720b 2024-12-06T10:11:56,220 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T10:11:56,564 INFO [M:0;552d6a33fa09:36447 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5019ddbcc91d4336a51780328a623d67 2024-12-06T10:11:56,598 DEBUG [M:0;552d6a33fa09:36447 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3ce1fab348cb4f8ba49ddf980161a0c1 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733479808741/Put/seqid=0 2024-12-06T10:11:56,608 INFO [regionserver/552d6a33fa09:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:11:56,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741858_1034 (size=6426) 2024-12-06T10:11:56,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741858_1034 (size=6426) 2024-12-06T10:11:56,610 INFO [M:0;552d6a33fa09:36447 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.61 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3ce1fab348cb4f8ba49ddf980161a0c1 2024-12-06T10:11:56,618 INFO [M:0;552d6a33fa09:36447 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3ce1fab348cb4f8ba49ddf980161a0c1 2024-12-06T10:11:56,635 DEBUG [M:0;552d6a33fa09:36447 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6a074fef8fd74c15a6eccb04078cebf1 is 69, key is 552d6a33fa09,45723,1733479805452/rs:state/1733479806513/Put/seqid=0 2024-12-06T10:11:56,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741859_1035 (size=5156) 2024-12-06T10:11:56,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741859_1035 (size=5156) 2024-12-06T10:11:56,655 INFO [M:0;552d6a33fa09:36447 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6a074fef8fd74c15a6eccb04078cebf1 2024-12-06T10:11:56,676 DEBUG [M:0;552d6a33fa09:36447 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/54c5a76e97e74587b632c3a0958ce16e is 52, key is load_balancer_on/state:d/1733479808207/Put/seqid=0 2024-12-06T10:11:56,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741860_1036 (size=5056) 2024-12-06T10:11:56,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741860_1036 (size=5056) 2024-12-06T10:11:56,683 INFO [M:0;552d6a33fa09:36447 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/54c5a76e97e74587b632c3a0958ce16e 2024-12-06T10:11:56,692 DEBUG [M:0;552d6a33fa09:36447 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5019ddbcc91d4336a51780328a623d67 as hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5019ddbcc91d4336a51780328a623d67 2024-12-06T10:11:56,698 INFO [M:0;552d6a33fa09:36447 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5019ddbcc91d4336a51780328a623d67, entries=8, sequenceid=104, filesize=5.5 K 2024-12-06T10:11:56,700 DEBUG [M:0;552d6a33fa09:36447 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3ce1fab348cb4f8ba49ddf980161a0c1 as hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3ce1fab348cb4f8ba49ddf980161a0c1 2024-12-06T10:11:56,706 INFO [M:0;552d6a33fa09:36447 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3ce1fab348cb4f8ba49ddf980161a0c1 2024-12-06T10:11:56,707 INFO [M:0;552d6a33fa09:36447 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3ce1fab348cb4f8ba49ddf980161a0c1, entries=11, sequenceid=104, filesize=6.3 K 2024-12-06T10:11:56,708 DEBUG [M:0;552d6a33fa09:36447 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6a074fef8fd74c15a6eccb04078cebf1 as hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6a074fef8fd74c15a6eccb04078cebf1 2024-12-06T10:11:56,715 INFO [M:0;552d6a33fa09:36447 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6a074fef8fd74c15a6eccb04078cebf1, entries=1, sequenceid=104, filesize=5.0 K 2024-12-06T10:11:56,716 DEBUG [M:0;552d6a33fa09:36447 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/54c5a76e97e74587b632c3a0958ce16e as hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/54c5a76e97e74587b632c3a0958ce16e 2024-12-06T10:11:56,723 INFO [M:0;552d6a33fa09:36447 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/54c5a76e97e74587b632c3a0958ce16e, entries=1, sequenceid=104, filesize=4.9 K 2024-12-06T10:11:56,724 INFO [M:0;552d6a33fa09:36447 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.21 KB/41173, heapSize ~50.08 KB/51280, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 598ms, sequenceid=104, compaction requested=false 2024-12-06T10:11:56,726 INFO [M:0;552d6a33fa09:36447 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:11:56,726 DEBUG [M:0;552d6a33fa09:36447 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:11:56,727 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/MasterData/WALs/552d6a33fa09,36447,1733479804672 2024-12-06T10:11:56,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42563 is added to blk_1073741830_1006 (size=48474) 2024-12-06T10:11:56,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41843 is added to blk_1073741830_1006 (size=48474) 2024-12-06T10:11:56,730 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:11:56,731 INFO [M:0;552d6a33fa09:36447 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T10:11:56,731 INFO [M:0;552d6a33fa09:36447 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36447 2024-12-06T10:11:56,733 DEBUG [M:0;552d6a33fa09:36447 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/552d6a33fa09,36447,1733479804672 already deleted, retry=false 2024-12-06T10:11:56,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:11:56,835 INFO [M:0;552d6a33fa09:36447 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,36447,1733479804672; zookeeper connection closed. 2024-12-06T10:11:56,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36447-0x10066ce64640000, quorum=127.0.0.1:49614, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:11:56,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@163cfad6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:11:56,847 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f952caa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:11:56,847 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:11:56,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6eb1b261{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:11:56,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4debea22{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/hadoop.log.dir/,STOPPED} 2024-12-06T10:11:56,850 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:11:56,850 WARN [BP-858773073-172.17.0.2-1733479801314 heartbeating to localhost/127.0.0.1:38219 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:11:56,851 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:11:56,851 WARN [BP-858773073-172.17.0.2-1733479801314 heartbeating to localhost/127.0.0.1:38219 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-858773073-172.17.0.2-1733479801314 (Datanode Uuid 16a6a190-58eb-4fa6-829b-64f750470630) service to localhost/127.0.0.1:38219 2024-12-06T10:11:56,852 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/cluster_87c68896-5857-2be3-30c5-0ba37489c94a/dfs/data/data3/current/BP-858773073-172.17.0.2-1733479801314 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:11:56,852 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/cluster_87c68896-5857-2be3-30c5-0ba37489c94a/dfs/data/data4/current/BP-858773073-172.17.0.2-1733479801314 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:11:56,853 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:11:56,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6aad8790{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:11:56,859 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@587d1dca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:11:56,859 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:11:56,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b4ce9e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:11:56,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2276bd44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/hadoop.log.dir/,STOPPED} 2024-12-06T10:11:56,861 WARN [BP-858773073-172.17.0.2-1733479801314 heartbeating to localhost/127.0.0.1:38219 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:11:56,861 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:11:56,861 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:11:56,861 WARN [BP-858773073-172.17.0.2-1733479801314 heartbeating to localhost/127.0.0.1:38219 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-858773073-172.17.0.2-1733479801314 (Datanode Uuid 0b98e0f6-d1f9-4dc9-acf4-5709e9f091c9) service to localhost/127.0.0.1:38219 2024-12-06T10:11:56,862 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/cluster_87c68896-5857-2be3-30c5-0ba37489c94a/dfs/data/data1/current/BP-858773073-172.17.0.2-1733479801314 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:11:56,862 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/cluster_87c68896-5857-2be3-30c5-0ba37489c94a/dfs/data/data2/current/BP-858773073-172.17.0.2-1733479801314 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:11:56,862 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:11:56,875 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5682c4d1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:11:56,876 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ff1a6c1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:11:56,876 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:11:56,877 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74468826{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:11:56,877 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@88aab13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/hadoop.log.dir/,STOPPED} 2024-12-06T10:11:56,887 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T10:11:56,933 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-06T10:11:56,944 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=61 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38219 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38219 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@1e7a9f39 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:38219 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38219 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38219 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: RS-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: regionserver/552d6a33fa09:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/552d6a33fa09:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:38219 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:38219 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/552d6a33fa09:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:38219 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=403 (was 286) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=296 (was 454), ProcessCount=13 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=7629 (was 8219) 2024-12-06T10:11:56,952 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=62, OpenFileDescriptor=403, MaxFileDescriptor=1048576, SystemLoadAverage=296, ProcessCount=11, AvailableMemoryMB=7628 2024-12-06T10:11:56,952 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T10:11:56,952 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/hadoop.log.dir so I do NOT create it in target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa 2024-12-06T10:11:56,952 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2290d52a-6ee2-1094-f45b-10bc4be255b6/hadoop.tmp.dir so I do NOT create it in target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa 2024-12-06T10:11:56,953 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee, deleteOnExit=true 2024-12-06T10:11:56,953 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T10:11:56,953 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/test.cache.data in system properties and HBase conf 2024-12-06T10:11:56,953 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T10:11:56,953 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir in system properties and HBase conf 2024-12-06T10:11:56,953 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T10:11:56,953 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T10:11:56,953 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T10:11:56,954 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T10:11:56,954 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:11:56,954 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:11:56,954 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T10:11:56,954 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:11:56,954 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T10:11:56,955 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T10:11:56,955 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:11:56,955 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:11:56,955 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T10:11:56,955 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/nfs.dump.dir in system properties and HBase conf 2024-12-06T10:11:56,955 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/java.io.tmpdir in system properties and HBase conf 2024-12-06T10:11:56,955 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:11:56,955 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T10:11:56,955 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T10:11:56,975 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T10:11:57,050 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:11:57,056 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:11:57,060 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:11:57,060 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:11:57,060 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:11:57,061 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:11:57,062 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39523ff9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:11:57,062 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1101772a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:11:57,185 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@69d3d453{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/java.io.tmpdir/jetty-localhost-36929-hadoop-hdfs-3_4_1-tests_jar-_-any-17561454687658122874/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:11:57,186 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@52ca9eab{HTTP/1.1, (http/1.1)}{localhost:36929} 2024-12-06T10:11:57,186 INFO [Time-limited test {}] server.Server(415): Started @118477ms 2024-12-06T10:11:57,200 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T10:11:57,289 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:11:57,294 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:11:57,296 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:11:57,296 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:11:57,296 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:11:57,297 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e9304e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:11:57,297 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59774852{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:11:57,463 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b51cf53{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/java.io.tmpdir/jetty-localhost-35011-hadoop-hdfs-3_4_1-tests_jar-_-any-7537268565899655577/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:11:57,465 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a2efd9{HTTP/1.1, (http/1.1)}{localhost:35011} 2024-12-06T10:11:57,465 INFO [Time-limited test {}] server.Server(415): Started @118756ms 2024-12-06T10:11:57,467 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:11:57,522 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:11:57,532 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:11:57,540 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:11:57,540 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:11:57,540 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:11:57,543 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b628890{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:11:57,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29f7fced{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:11:57,600 WARN [Thread-449 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data1/current/BP-2117338848-172.17.0.2-1733479916995/current, will proceed with Du for space computation calculation, 2024-12-06T10:11:57,612 WARN [Thread-450 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data2/current/BP-2117338848-172.17.0.2-1733479916995/current, will proceed with Du for space computation calculation, 2024-12-06T10:11:57,680 WARN [Thread-428 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:11:57,684 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3e4664aee57de5bc with lease ID 0xb696fa3a183d7320: Processing first storage report for DS-012471da-beee-4a04-a715-f05c9493cd54 from datanode DatanodeRegistration(127.0.0.1:37505, datanodeUuid=0eda97cd-fa7c-4eb7-8b05-ca9e586e925b, infoPort=37499, infoSecurePort=0, ipcPort=46695, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995) 2024-12-06T10:11:57,685 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3e4664aee57de5bc with lease ID 0xb696fa3a183d7320: from storage DS-012471da-beee-4a04-a715-f05c9493cd54 node DatanodeRegistration(127.0.0.1:37505, datanodeUuid=0eda97cd-fa7c-4eb7-8b05-ca9e586e925b, infoPort=37499, infoSecurePort=0, ipcPort=46695, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:11:57,688 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3e4664aee57de5bc with lease ID 0xb696fa3a183d7320: Processing first storage report for DS-f0a81264-e6ba-4933-b130-db7af9d6b638 from datanode DatanodeRegistration(127.0.0.1:37505, datanodeUuid=0eda97cd-fa7c-4eb7-8b05-ca9e586e925b, infoPort=37499, infoSecurePort=0, ipcPort=46695, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995) 2024-12-06T10:11:57,688 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3e4664aee57de5bc with lease ID 0xb696fa3a183d7320: from storage DS-f0a81264-e6ba-4933-b130-db7af9d6b638 node DatanodeRegistration(127.0.0.1:37505, datanodeUuid=0eda97cd-fa7c-4eb7-8b05-ca9e586e925b, infoPort=37499, infoSecurePort=0, ipcPort=46695, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:11:57,704 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c586cce{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/java.io.tmpdir/jetty-localhost-45979-hadoop-hdfs-3_4_1-tests_jar-_-any-10534293817685126119/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:11:57,705 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4d899dad{HTTP/1.1, (http/1.1)}{localhost:45979} 2024-12-06T10:11:57,707 INFO [Time-limited test {}] server.Server(415): Started @118998ms 2024-12-06T10:11:57,710 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:11:57,853 WARN [Thread-475 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data3/current/BP-2117338848-172.17.0.2-1733479916995/current, will proceed with Du for space computation calculation, 2024-12-06T10:11:57,855 WARN [Thread-476 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data4/current/BP-2117338848-172.17.0.2-1733479916995/current, will proceed with Du for space computation calculation, 2024-12-06T10:11:57,897 WARN [Thread-464 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:11:57,901 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd244ad392f9dff7f with lease ID 0xb696fa3a183d7321: Processing first storage report for DS-ab367aac-e619-482e-8f82-86fdb4bfa97c from datanode DatanodeRegistration(127.0.0.1:39641, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=41643, infoSecurePort=0, ipcPort=34417, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995) 2024-12-06T10:11:57,901 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd244ad392f9dff7f with lease ID 0xb696fa3a183d7321: from storage DS-ab367aac-e619-482e-8f82-86fdb4bfa97c node DatanodeRegistration(127.0.0.1:39641, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=41643, infoSecurePort=0, ipcPort=34417, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:11:57,901 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd244ad392f9dff7f with lease ID 0xb696fa3a183d7321: Processing first storage report for DS-aec44971-df45-419f-8221-dffb6a780dfe from datanode DatanodeRegistration(127.0.0.1:39641, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=41643, infoSecurePort=0, ipcPort=34417, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995) 2024-12-06T10:11:57,901 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd244ad392f9dff7f with lease ID 0xb696fa3a183d7321: from storage DS-aec44971-df45-419f-8221-dffb6a780dfe node DatanodeRegistration(127.0.0.1:39641, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=41643, infoSecurePort=0, ipcPort=34417, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:11:57,986 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa 2024-12-06T10:11:57,996 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/zookeeper_0, clientPort=52351, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T10:11:57,998 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=52351 2024-12-06T10:11:57,998 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:11:58,000 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:11:58,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37505 is added to blk_1073741825_1001 (size=7) 2024-12-06T10:11:58,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741825_1001 (size=7) 2024-12-06T10:11:58,019 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565 with version=8 2024-12-06T10:11:58,019 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/hbase-staging 2024-12-06T10:11:58,023 INFO [Time-limited test {}] client.ConnectionUtils(129): master/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:11:58,023 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:11:58,023 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:11:58,023 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:11:58,023 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:11:58,023 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:11:58,024 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:11:58,024 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:11:58,028 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45929 2024-12-06T10:11:58,028 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:11:58,030 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:11:58,033 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:45929 connecting to ZooKeeper ensemble=127.0.0.1:52351 2024-12-06T10:11:58,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:459290x0, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:11:58,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45929-0x10066d0228b0000 connected 2024-12-06T10:11:58,067 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:11:58,069 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:11:58,069 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:11:58,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45929 2024-12-06T10:11:58,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45929 2024-12-06T10:11:58,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45929 2024-12-06T10:11:58,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45929 2024-12-06T10:11:58,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45929 2024-12-06T10:11:58,079 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565, hbase.cluster.distributed=false 2024-12-06T10:11:58,099 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:11:58,099 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:11:58,099 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:11:58,099 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:11:58,099 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:11:58,099 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:11:58,100 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:11:58,100 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:11:58,104 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:39435 2024-12-06T10:11:58,105 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T10:11:58,108 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T10:11:58,109 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:11:58,112 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:11:58,117 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:39435 connecting to ZooKeeper ensemble=127.0.0.1:52351 2024-12-06T10:11:58,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:394350x0, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:11:58,139 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:394350x0, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:11:58,140 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39435-0x10066d0228b0001 connected 2024-12-06T10:11:58,140 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:11:58,141 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:11:58,160 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39435 2024-12-06T10:11:58,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39435 2024-12-06T10:11:58,188 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39435 2024-12-06T10:11:58,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39435 2024-12-06T10:11:58,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39435 2024-12-06T10:11:58,202 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/552d6a33fa09,45929,1733479918022 2024-12-06T10:11:58,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:11:58,210 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/552d6a33fa09,45929,1733479918022 2024-12-06T10:11:58,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:11:58,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:11:58,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:58,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:11:58,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:58,214 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:11:58,214 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/552d6a33fa09,45929,1733479918022 from backup master directory 2024-12-06T10:11:58,215 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:11:58,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/552d6a33fa09,45929,1733479918022 2024-12-06T10:11:58,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:11:58,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:11:58,217 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:11:58,217 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=552d6a33fa09,45929,1733479918022 2024-12-06T10:11:58,221 DEBUG [M:0;552d6a33fa09:45929 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;552d6a33fa09:45929 2024-12-06T10:11:58,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741826_1002 (size=42) 2024-12-06T10:11:58,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37505 is added to blk_1073741826_1002 (size=42) 2024-12-06T10:11:58,254 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/hbase.id with ID: 76547cdf-3d23-4ad5-b4e0-9a755aff2b93 2024-12-06T10:11:58,278 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:11:58,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:58,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:58,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37505 is added to blk_1073741827_1003 (size=196) 2024-12-06T10:11:58,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741827_1003 (size=196) 2024-12-06T10:11:58,320 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:11:58,322 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T10:11:58,324 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:11:58,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37505 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:11:58,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:11:58,344 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store 2024-12-06T10:11:58,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37505 is added to blk_1073741829_1005 (size=34) 2024-12-06T10:11:58,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741829_1005 (size=34) 2024-12-06T10:11:58,359 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:11:58,360 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:11:58,360 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:11:58,360 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:11:58,360 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:11:58,360 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:11:58,360 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:11:58,360 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:11:58,361 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/.initializing 2024-12-06T10:11:58,361 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/WALs/552d6a33fa09,45929,1733479918022 2024-12-06T10:11:58,365 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C45929%2C1733479918022, suffix=, logDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/WALs/552d6a33fa09,45929,1733479918022, archiveDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/oldWALs, maxLogs=10 2024-12-06T10:11:58,366 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45929%2C1733479918022.1733479918366 2024-12-06T10:11:58,379 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/WALs/552d6a33fa09,45929,1733479918022/552d6a33fa09%2C45929%2C1733479918022.1733479918366 2024-12-06T10:11:58,380 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41643:41643),(127.0.0.1/127.0.0.1:37499:37499)] 2024-12-06T10:11:58,380 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:11:58,380 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:11:58,380 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:11:58,380 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:11:58,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:11:58,386 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T10:11:58,386 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:58,387 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:11:58,387 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:11:58,389 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T10:11:58,389 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:58,390 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:11:58,390 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:11:58,392 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T10:11:58,392 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:58,393 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:11:58,393 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:11:58,395 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T10:11:58,396 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:58,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:11:58,397 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:11:58,398 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:11:58,401 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T10:11:58,403 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:11:58,406 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:11:58,407 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753267, jitterRate=-0.04217243194580078}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T10:11:58,409 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:11:58,409 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T10:11:58,417 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@282e90e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:11:58,418 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T10:11:58,419 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T10:11:58,419 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T10:11:58,419 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T10:11:58,420 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T10:11:58,420 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-06T10:11:58,420 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T10:11:58,423 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T10:11:58,424 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T10:11:58,425 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T10:11:58,426 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T10:11:58,427 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T10:11:58,429 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T10:11:58,429 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T10:11:58,432 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T10:11:58,434 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T10:11:58,435 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T10:11:58,436 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T10:11:58,438 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T10:11:58,439 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T10:11:58,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:11:58,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:11:58,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:58,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:58,442 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=552d6a33fa09,45929,1733479918022, sessionid=0x10066d0228b0000, setting cluster-up flag (Was=false) 2024-12-06T10:11:58,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:58,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:58,450 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T10:11:58,452 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,45929,1733479918022 2024-12-06T10:11:58,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:58,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:58,466 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T10:11:58,468 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,45929,1733479918022 2024-12-06T10:11:58,471 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T10:11:58,471 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T10:11:58,471 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T10:11:58,471 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 552d6a33fa09,45929,1733479918022 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T10:11:58,472 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:11:58,472 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:11:58,472 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:11:58,472 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:11:58,472 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/552d6a33fa09:0, corePoolSize=10, maxPoolSize=10 2024-12-06T10:11:58,472 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:58,472 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:11:58,472 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:58,473 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733479948473 2024-12-06T10:11:58,473 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T10:11:58,473 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T10:11:58,473 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T10:11:58,473 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T10:11:58,473 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T10:11:58,473 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T10:11:58,474 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:58,474 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:11:58,474 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T10:11:58,474 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T10:11:58,474 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T10:11:58,474 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T10:11:58,475 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T10:11:58,475 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T10:11:58,475 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733479918475,5,FailOnTimeoutGroup] 2024-12-06T10:11:58,475 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733479918475,5,FailOnTimeoutGroup] 2024-12-06T10:11:58,475 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:58,475 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:58,476 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T10:11:58,476 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:58,476 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:11:58,476 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:58,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741831_1007 (size=1039) 2024-12-06T10:11:58,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37505 is added to blk_1073741831_1007 (size=1039) 2024-12-06T10:11:58,484 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T10:11:58,485 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565 2024-12-06T10:11:58,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741832_1008 (size=32) 2024-12-06T10:11:58,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37505 is added to blk_1073741832_1008 (size=32) 2024-12-06T10:11:58,493 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:11:58,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:11:58,496 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:11:58,496 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:58,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:11:58,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:11:58,499 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:11:58,499 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:58,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:11:58,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:11:58,501 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:11:58,501 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:58,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:11:58,502 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/hbase/meta/1588230740 2024-12-06T10:11:58,503 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/hbase/meta/1588230740 2024-12-06T10:11:58,505 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:11:58,506 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:11:58,509 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:11:58,509 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=811925, jitterRate=0.03241679072380066}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:11:58,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:11:58,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:11:58,511 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:11:58,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:11:58,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:11:58,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:11:58,511 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T10:11:58,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:11:58,513 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:11:58,513 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T10:11:58,513 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T10:11:58,514 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T10:11:58,515 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T10:11:58,520 DEBUG [RS:0;552d6a33fa09:39435 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;552d6a33fa09:39435 2024-12-06T10:11:58,521 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1008): ClusterId : 76547cdf-3d23-4ad5-b4e0-9a755aff2b93 2024-12-06T10:11:58,521 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T10:11:58,523 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T10:11:58,523 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T10:11:58,525 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T10:11:58,525 DEBUG [RS:0;552d6a33fa09:39435 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e139f98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:11:58,526 DEBUG [RS:0;552d6a33fa09:39435 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35a76682, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:11:58,526 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T10:11:58,526 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T10:11:58,526 DEBUG [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T10:11:58,526 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(3073): reportForDuty to master=552d6a33fa09,45929,1733479918022 with isa=552d6a33fa09/172.17.0.2:39435, startcode=1733479918098 2024-12-06T10:11:58,527 DEBUG [RS:0;552d6a33fa09:39435 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:11:58,542 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39051, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:11:58,543 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45929 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 552d6a33fa09,39435,1733479918098 2024-12-06T10:11:58,543 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45929 {}] master.ServerManager(486): Registering regionserver=552d6a33fa09,39435,1733479918098 2024-12-06T10:11:58,545 DEBUG [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565 2024-12-06T10:11:58,545 DEBUG [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:45977 2024-12-06T10:11:58,545 DEBUG [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T10:11:58,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:11:58,548 DEBUG [RS:0;552d6a33fa09:39435 {}] zookeeper.ZKUtil(111): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/552d6a33fa09,39435,1733479918098 2024-12-06T10:11:58,549 WARN [RS:0;552d6a33fa09:39435 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:11:58,549 INFO [RS:0;552d6a33fa09:39435 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:11:58,549 DEBUG [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098 2024-12-06T10:11:58,549 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [552d6a33fa09,39435,1733479918098] 2024-12-06T10:11:58,554 DEBUG [RS:0;552d6a33fa09:39435 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T10:11:58,554 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T10:11:58,557 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T10:11:58,557 INFO [RS:0;552d6a33fa09:39435 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T10:11:58,557 INFO [RS:0;552d6a33fa09:39435 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:58,558 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T10:11:58,559 INFO [RS:0;552d6a33fa09:39435 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:58,560 DEBUG [RS:0;552d6a33fa09:39435 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:58,560 DEBUG [RS:0;552d6a33fa09:39435 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:58,560 DEBUG [RS:0;552d6a33fa09:39435 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:58,560 DEBUG [RS:0;552d6a33fa09:39435 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:58,560 DEBUG [RS:0;552d6a33fa09:39435 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:58,560 DEBUG [RS:0;552d6a33fa09:39435 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:11:58,560 DEBUG [RS:0;552d6a33fa09:39435 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:58,560 DEBUG [RS:0;552d6a33fa09:39435 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:58,560 DEBUG [RS:0;552d6a33fa09:39435 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:58,560 DEBUG [RS:0;552d6a33fa09:39435 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:58,560 DEBUG [RS:0;552d6a33fa09:39435 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:58,561 DEBUG [RS:0;552d6a33fa09:39435 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:11:58,561 DEBUG [RS:0;552d6a33fa09:39435 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:11:58,564 INFO [RS:0;552d6a33fa09:39435 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:58,564 INFO [RS:0;552d6a33fa09:39435 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:58,564 INFO [RS:0;552d6a33fa09:39435 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:58,564 INFO [RS:0;552d6a33fa09:39435 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:58,564 INFO [RS:0;552d6a33fa09:39435 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,39435,1733479918098-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:11:58,585 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T10:11:58,585 INFO [RS:0;552d6a33fa09:39435 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,39435,1733479918098-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:58,602 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.Replication(204): 552d6a33fa09,39435,1733479918098 started 2024-12-06T10:11:58,602 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1767): Serving as 552d6a33fa09,39435,1733479918098, RpcServer on 552d6a33fa09/172.17.0.2:39435, sessionid=0x10066d0228b0001 2024-12-06T10:11:58,602 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T10:11:58,602 DEBUG [RS:0;552d6a33fa09:39435 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 552d6a33fa09,39435,1733479918098 2024-12-06T10:11:58,602 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,39435,1733479918098' 2024-12-06T10:11:58,602 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T10:11:58,603 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T10:11:58,604 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T10:11:58,604 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T10:11:58,604 DEBUG [RS:0;552d6a33fa09:39435 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 552d6a33fa09,39435,1733479918098 2024-12-06T10:11:58,604 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,39435,1733479918098' 2024-12-06T10:11:58,604 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T10:11:58,604 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T10:11:58,604 DEBUG [RS:0;552d6a33fa09:39435 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T10:11:58,604 INFO [RS:0;552d6a33fa09:39435 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T10:11:58,604 INFO [RS:0;552d6a33fa09:39435 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T10:11:58,666 WARN [552d6a33fa09:45929 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-06T10:11:58,707 INFO [RS:0;552d6a33fa09:39435 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C39435%2C1733479918098, suffix=, logDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098, archiveDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/oldWALs, maxLogs=32 2024-12-06T10:11:58,709 INFO [RS:0;552d6a33fa09:39435 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C39435%2C1733479918098.1733479918709 2024-12-06T10:11:58,720 INFO [RS:0;552d6a33fa09:39435 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 2024-12-06T10:11:58,720 DEBUG [RS:0;552d6a33fa09:39435 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37499:37499),(127.0.0.1/127.0.0.1:41643:41643)] 2024-12-06T10:11:58,916 DEBUG [552d6a33fa09:45929 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T10:11:58,916 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=552d6a33fa09,39435,1733479918098 2024-12-06T10:11:58,918 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,39435,1733479918098, state=OPENING 2024-12-06T10:11:58,920 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T10:11:58,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:58,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:58,922 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:11:58,922 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=552d6a33fa09,39435,1733479918098}] 2024-12-06T10:11:58,922 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:11:59,076 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,39435,1733479918098 2024-12-06T10:11:59,076 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T10:11:59,079 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53818, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T10:11:59,083 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T10:11:59,084 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:11:59,086 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C39435%2C1733479918098.meta, suffix=.meta, logDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098, archiveDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/oldWALs, maxLogs=32 2024-12-06T10:11:59,089 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta 2024-12-06T10:11:59,099 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta 2024-12-06T10:11:59,099 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37499:37499),(127.0.0.1/127.0.0.1:41643:41643)] 2024-12-06T10:11:59,099 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:11:59,100 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T10:11:59,100 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T10:11:59,100 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T10:11:59,100 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T10:11:59,100 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:11:59,101 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T10:11:59,101 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T10:11:59,103 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:11:59,104 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:11:59,104 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:59,105 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:11:59,105 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:11:59,106 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:11:59,106 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:59,107 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:11:59,107 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:11:59,108 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:11:59,108 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:59,108 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:11:59,109 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/hbase/meta/1588230740 2024-12-06T10:11:59,111 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/hbase/meta/1588230740 2024-12-06T10:11:59,113 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:11:59,115 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:11:59,116 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=819846, jitterRate=0.0424889475107193}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:11:59,118 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:11:59,119 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733479919076 2024-12-06T10:11:59,122 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T10:11:59,122 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T10:11:59,123 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,39435,1733479918098 2024-12-06T10:11:59,124 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,39435,1733479918098, state=OPEN 2024-12-06T10:11:59,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:11:59,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:11:59,128 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:11:59,128 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:11:59,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T10:11:59,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=552d6a33fa09,39435,1733479918098 in 206 msec 2024-12-06T10:11:59,136 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T10:11:59,136 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 620 msec 2024-12-06T10:11:59,139 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 668 msec 2024-12-06T10:11:59,139 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733479919139, completionTime=-1 2024-12-06T10:11:59,139 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T10:11:59,139 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T10:11:59,140 DEBUG [hconnection-0x4f7af457-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:11:59,142 INFO [RS-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53824, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:11:59,143 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T10:11:59,143 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733479979143 2024-12-06T10:11:59,144 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733480039144 2024-12-06T10:11:59,144 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 4 msec 2024-12-06T10:11:59,152 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,45929,1733479918022-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:59,153 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,45929,1733479918022-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:59,153 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,45929,1733479918022-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:59,153 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-552d6a33fa09:45929, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:59,153 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:59,153 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T10:11:59,153 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:11:59,155 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T10:11:59,156 DEBUG [master/552d6a33fa09:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T10:11:59,157 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:11:59,157 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:59,158 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:11:59,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741835_1011 (size=358) 2024-12-06T10:11:59,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37505 is added to blk_1073741835_1011 (size=358) 2024-12-06T10:11:59,170 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2d62f68be62f6454a46e6823da00beaf, NAME => 'hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565 2024-12-06T10:11:59,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741836_1012 (size=42) 2024-12-06T10:11:59,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37505 is added to blk_1073741836_1012 (size=42) 2024-12-06T10:11:59,178 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:11:59,178 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 2d62f68be62f6454a46e6823da00beaf, disabling compactions & flushes 2024-12-06T10:11:59,178 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:11:59,178 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:11:59,179 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. after waiting 0 ms 2024-12-06T10:11:59,179 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:11:59,179 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:11:59,179 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2d62f68be62f6454a46e6823da00beaf: 2024-12-06T10:11:59,180 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:11:59,181 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733479919180"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733479919180"}]},"ts":"1733479919180"} 2024-12-06T10:11:59,183 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:11:59,185 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:11:59,185 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733479919185"}]},"ts":"1733479919185"} 2024-12-06T10:11:59,187 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T10:11:59,192 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=2d62f68be62f6454a46e6823da00beaf, ASSIGN}] 2024-12-06T10:11:59,193 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=2d62f68be62f6454a46e6823da00beaf, ASSIGN 2024-12-06T10:11:59,195 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=2d62f68be62f6454a46e6823da00beaf, ASSIGN; state=OFFLINE, location=552d6a33fa09,39435,1733479918098; forceNewPlan=false, retain=false 2024-12-06T10:11:59,345 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=2d62f68be62f6454a46e6823da00beaf, regionState=OPENING, regionLocation=552d6a33fa09,39435,1733479918098 2024-12-06T10:11:59,348 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 2d62f68be62f6454a46e6823da00beaf, server=552d6a33fa09,39435,1733479918098}] 2024-12-06T10:11:59,501 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,39435,1733479918098 2024-12-06T10:11:59,506 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:11:59,507 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 2d62f68be62f6454a46e6823da00beaf, NAME => 'hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:11:59,507 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 2d62f68be62f6454a46e6823da00beaf 2024-12-06T10:11:59,507 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:11:59,508 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 2d62f68be62f6454a46e6823da00beaf 2024-12-06T10:11:59,508 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 2d62f68be62f6454a46e6823da00beaf 2024-12-06T10:11:59,510 INFO [StoreOpener-2d62f68be62f6454a46e6823da00beaf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2d62f68be62f6454a46e6823da00beaf 2024-12-06T10:11:59,512 INFO [StoreOpener-2d62f68be62f6454a46e6823da00beaf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d62f68be62f6454a46e6823da00beaf columnFamilyName info 2024-12-06T10:11:59,512 DEBUG [StoreOpener-2d62f68be62f6454a46e6823da00beaf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:59,512 INFO [StoreOpener-2d62f68be62f6454a46e6823da00beaf-1 {}] regionserver.HStore(327): Store=2d62f68be62f6454a46e6823da00beaf/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:11:59,514 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/hbase/namespace/2d62f68be62f6454a46e6823da00beaf 2024-12-06T10:11:59,514 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/hbase/namespace/2d62f68be62f6454a46e6823da00beaf 2024-12-06T10:11:59,517 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 2d62f68be62f6454a46e6823da00beaf 2024-12-06T10:11:59,520 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/hbase/namespace/2d62f68be62f6454a46e6823da00beaf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:11:59,521 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 2d62f68be62f6454a46e6823da00beaf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716436, jitterRate=-0.08900457620620728}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:11:59,523 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 2d62f68be62f6454a46e6823da00beaf: 2024-12-06T10:11:59,524 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf., pid=6, masterSystemTime=1733479919501 2024-12-06T10:11:59,527 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:11:59,527 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:11:59,527 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=2d62f68be62f6454a46e6823da00beaf, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,39435,1733479918098 2024-12-06T10:11:59,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T10:11:59,535 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 2d62f68be62f6454a46e6823da00beaf, server=552d6a33fa09,39435,1733479918098 in 182 msec 2024-12-06T10:11:59,537 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T10:11:59,537 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=2d62f68be62f6454a46e6823da00beaf, ASSIGN in 341 msec 2024-12-06T10:11:59,538 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:11:59,538 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733479919538"}]},"ts":"1733479919538"} 2024-12-06T10:11:59,540 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T10:11:59,544 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:11:59,546 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 391 msec 2024-12-06T10:11:59,556 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T10:11:59,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:11:59,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:59,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:11:59,568 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T10:11:59,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:11:59,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 14 msec 2024-12-06T10:11:59,591 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T10:11:59,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:11:59,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-12-06T10:11:59,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T10:11:59,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T10:11:59,619 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.401sec 2024-12-06T10:11:59,619 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T10:11:59,620 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T10:11:59,620 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T10:11:59,620 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T10:11:59,620 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T10:11:59,620 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,45929,1733479918022-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:11:59,620 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,45929,1733479918022-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T10:11:59,626 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T10:11:59,626 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T10:11:59,626 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,45929,1733479918022-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:59,706 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62cd5620 to 127.0.0.1:52351 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15f71bfd 2024-12-06T10:11:59,710 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7faa2a92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:11:59,718 DEBUG [hconnection-0x6fb20afc-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:11:59,721 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53838, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:11:59,724 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=552d6a33fa09,45929,1733479918022 2024-12-06T10:11:59,725 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:11:59,728 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-06T10:11:59,747 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:11:59,748 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:11:59,748 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:11:59,748 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:11:59,748 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:11:59,748 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:11:59,748 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:11:59,748 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:11:59,749 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41575 2024-12-06T10:11:59,749 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T10:11:59,750 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T10:11:59,751 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:11:59,753 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:11:59,756 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41575 connecting to ZooKeeper ensemble=127.0.0.1:52351 2024-12-06T10:11:59,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:415750x0, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:11:59,759 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41575-0x10066d0228b0003, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:11:59,759 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41575-0x10066d0228b0003 connected 2024-12-06T10:11:59,760 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41575-0x10066d0228b0003, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-06T10:11:59,761 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41575-0x10066d0228b0003, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:11:59,767 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41575 2024-12-06T10:11:59,768 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41575 2024-12-06T10:11:59,770 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41575 2024-12-06T10:11:59,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41575 2024-12-06T10:11:59,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41575 2024-12-06T10:11:59,772 DEBUG [pool-282-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-06T10:11:59,792 DEBUG [RS:1;552d6a33fa09:41575 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;552d6a33fa09:41575 2024-12-06T10:11:59,793 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer(1008): ClusterId : 76547cdf-3d23-4ad5-b4e0-9a755aff2b93 2024-12-06T10:11:59,793 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T10:11:59,796 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T10:11:59,796 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T10:11:59,798 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T10:11:59,798 DEBUG [RS:1;552d6a33fa09:41575 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2aa78894, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:11:59,799 DEBUG [RS:1;552d6a33fa09:41575 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@604e960c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:11:59,799 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T10:11:59,799 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T10:11:59,799 DEBUG [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T10:11:59,800 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer(3073): reportForDuty to master=552d6a33fa09,45929,1733479918022 with isa=552d6a33fa09/172.17.0.2:41575, startcode=1733479919747 2024-12-06T10:11:59,800 DEBUG [RS:1;552d6a33fa09:41575 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:11:59,805 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56355, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:11:59,805 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45929 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 552d6a33fa09,41575,1733479919747 2024-12-06T10:11:59,806 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45929 {}] master.ServerManager(486): Registering regionserver=552d6a33fa09,41575,1733479919747 2024-12-06T10:11:59,807 DEBUG [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565 2024-12-06T10:11:59,807 DEBUG [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:45977 2024-12-06T10:11:59,808 DEBUG [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T10:11:59,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:11:59,810 DEBUG [RS:1;552d6a33fa09:41575 {}] zookeeper.ZKUtil(111): regionserver:41575-0x10066d0228b0003, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/552d6a33fa09,41575,1733479919747 2024-12-06T10:11:59,810 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [552d6a33fa09,41575,1733479919747] 2024-12-06T10:11:59,810 WARN [RS:1;552d6a33fa09:41575 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:11:59,811 INFO [RS:1;552d6a33fa09:41575 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:11:59,811 DEBUG [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,41575,1733479919747 2024-12-06T10:11:59,821 DEBUG [RS:1;552d6a33fa09:41575 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T10:11:59,822 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T10:11:59,825 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T10:11:59,828 INFO [RS:1;552d6a33fa09:41575 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T10:11:59,828 INFO [RS:1;552d6a33fa09:41575 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:59,828 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T10:11:59,830 INFO [RS:1;552d6a33fa09:41575 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:59,830 DEBUG [RS:1;552d6a33fa09:41575 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:59,830 DEBUG [RS:1;552d6a33fa09:41575 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:59,830 DEBUG [RS:1;552d6a33fa09:41575 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:59,830 DEBUG [RS:1;552d6a33fa09:41575 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:59,830 DEBUG [RS:1;552d6a33fa09:41575 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:59,830 DEBUG [RS:1;552d6a33fa09:41575 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:11:59,830 DEBUG [RS:1;552d6a33fa09:41575 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:59,830 DEBUG [RS:1;552d6a33fa09:41575 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:59,830 DEBUG [RS:1;552d6a33fa09:41575 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:59,830 DEBUG [RS:1;552d6a33fa09:41575 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:59,831 DEBUG [RS:1;552d6a33fa09:41575 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:11:59,831 DEBUG [RS:1;552d6a33fa09:41575 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:11:59,831 DEBUG [RS:1;552d6a33fa09:41575 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:11:59,833 INFO [RS:1;552d6a33fa09:41575 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:59,833 INFO [RS:1;552d6a33fa09:41575 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:59,833 INFO [RS:1;552d6a33fa09:41575 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:59,833 INFO [RS:1;552d6a33fa09:41575 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:59,833 INFO [RS:1;552d6a33fa09:41575 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,41575,1733479919747-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:11:59,860 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T10:11:59,860 INFO [RS:1;552d6a33fa09:41575 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,41575,1733479919747-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:11:59,884 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.Replication(204): 552d6a33fa09,41575,1733479919747 started 2024-12-06T10:11:59,884 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer(1767): Serving as 552d6a33fa09,41575,1733479919747, RpcServer on 552d6a33fa09/172.17.0.2:41575, sessionid=0x10066d0228b0003 2024-12-06T10:11:59,884 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T10:11:59,884 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3355): Started new server=Thread[RS:1;552d6a33fa09:41575,5,FailOnTimeoutGroup] 2024-12-06T10:11:59,884 DEBUG [RS:1;552d6a33fa09:41575 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 552d6a33fa09,41575,1733479919747 2024-12-06T10:11:59,885 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,41575,1733479919747' 2024-12-06T10:11:59,885 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T10:11:59,885 INFO [Time-limited test {}] wal.TestLogRolling(191): Replication=2 2024-12-06T10:11:59,885 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T10:11:59,886 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T10:11:59,886 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T10:11:59,886 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T10:11:59,886 DEBUG [RS:1;552d6a33fa09:41575 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 552d6a33fa09,41575,1733479919747 2024-12-06T10:11:59,886 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,41575,1733479919747' 2024-12-06T10:11:59,886 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T10:11:59,887 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T10:11:59,887 DEBUG [RS:1;552d6a33fa09:41575 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T10:11:59,887 INFO [RS:1;552d6a33fa09:41575 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T10:11:59,887 INFO [RS:1;552d6a33fa09:41575 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T10:11:59,889 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43700, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T10:11:59,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45929 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T10:11:59,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45929 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T10:11:59,891 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45929 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:11:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45929 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-06T10:11:59,895 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:11:59,895 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:11:59,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45929 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 9 2024-12-06T10:11:59,896 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:11:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45929 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:11:59,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741837_1013 (size=393) 2024-12-06T10:11:59,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37505 is added to blk_1073741837_1013 (size=393) 2024-12-06T10:11:59,929 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3a2059af8836cf786145efe06400336b, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565 2024-12-06T10:11:59,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741838_1014 (size=76) 2024-12-06T10:11:59,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37505 is added to blk_1073741838_1014 (size=76) 2024-12-06T10:11:59,941 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:11:59,941 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1681): Closing 3a2059af8836cf786145efe06400336b, disabling compactions & flushes 2024-12-06T10:11:59,941 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. 2024-12-06T10:11:59,941 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. 2024-12-06T10:11:59,941 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. after waiting 0 ms 2024-12-06T10:11:59,941 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. 2024-12-06T10:11:59,941 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. 2024-12-06T10:11:59,941 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3a2059af8836cf786145efe06400336b: 2024-12-06T10:11:59,943 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:11:59,943 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733479919943"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733479919943"}]},"ts":"1733479919943"} 2024-12-06T10:11:59,946 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:11:59,947 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:11:59,948 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733479919948"}]},"ts":"1733479919948"} 2024-12-06T10:11:59,950 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-06T10:11:59,954 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=3a2059af8836cf786145efe06400336b, ASSIGN}] 2024-12-06T10:11:59,956 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=3a2059af8836cf786145efe06400336b, ASSIGN 2024-12-06T10:11:59,957 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=3a2059af8836cf786145efe06400336b, ASSIGN; state=OFFLINE, location=552d6a33fa09,39435,1733479918098; forceNewPlan=false, retain=false 2024-12-06T10:11:59,990 INFO [RS:1;552d6a33fa09:41575 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C41575%2C1733479919747, suffix=, logDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,41575,1733479919747, archiveDir=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/oldWALs, maxLogs=32 2024-12-06T10:11:59,991 INFO [RS:1;552d6a33fa09:41575 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C41575%2C1733479919747.1733479919991 2024-12-06T10:12:00,002 INFO [RS:1;552d6a33fa09:41575 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,41575,1733479919747/552d6a33fa09%2C41575%2C1733479919747.1733479919991 2024-12-06T10:12:00,002 DEBUG [RS:1;552d6a33fa09:41575 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41643:41643),(127.0.0.1/127.0.0.1:37499:37499)] 2024-12-06T10:12:00,110 INFO [552d6a33fa09:45929 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-06T10:12:00,110 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=3a2059af8836cf786145efe06400336b, regionState=OPENING, regionLocation=552d6a33fa09,39435,1733479918098 2024-12-06T10:12:00,114 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 3a2059af8836cf786145efe06400336b, server=552d6a33fa09,39435,1733479918098}] 2024-12-06T10:12:00,267 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,39435,1733479918098 2024-12-06T10:12:00,272 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. 2024-12-06T10:12:00,272 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 3a2059af8836cf786145efe06400336b, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:12:00,273 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 3a2059af8836cf786145efe06400336b 2024-12-06T10:12:00,273 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:12:00,273 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 3a2059af8836cf786145efe06400336b 2024-12-06T10:12:00,273 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 3a2059af8836cf786145efe06400336b 2024-12-06T10:12:00,275 INFO [StoreOpener-3a2059af8836cf786145efe06400336b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3a2059af8836cf786145efe06400336b 2024-12-06T10:12:00,277 INFO [StoreOpener-3a2059af8836cf786145efe06400336b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3a2059af8836cf786145efe06400336b columnFamilyName info 2024-12-06T10:12:00,278 DEBUG [StoreOpener-3a2059af8836cf786145efe06400336b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:00,278 INFO [StoreOpener-3a2059af8836cf786145efe06400336b-1 {}] regionserver.HStore(327): Store=3a2059af8836cf786145efe06400336b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:12:00,279 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b 2024-12-06T10:12:00,279 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b 2024-12-06T10:12:00,282 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 3a2059af8836cf786145efe06400336b 2024-12-06T10:12:00,284 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:12:00,285 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 3a2059af8836cf786145efe06400336b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=859451, jitterRate=0.0928485095500946}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:12:00,286 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 3a2059af8836cf786145efe06400336b: 2024-12-06T10:12:00,287 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b., pid=11, masterSystemTime=1733479920267 2024-12-06T10:12:00,289 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. 2024-12-06T10:12:00,289 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. 2024-12-06T10:12:00,290 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=3a2059af8836cf786145efe06400336b, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,39435,1733479918098 2024-12-06T10:12:00,294 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-06T10:12:00,294 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 3a2059af8836cf786145efe06400336b, server=552d6a33fa09,39435,1733479918098 in 178 msec 2024-12-06T10:12:00,297 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-06T10:12:00,297 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=3a2059af8836cf786145efe06400336b, ASSIGN in 340 msec 2024-12-06T10:12:00,298 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:12:00,298 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733479920298"}]},"ts":"1733479920298"} 2024-12-06T10:12:00,300 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-06T10:12:00,303 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:12:00,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 412 msec 2024-12-06T10:12:00,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:00,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:01,318 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T10:12:01,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:01,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:04,554 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T10:12:04,555 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-06T10:12:04,556 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-06T10:12:05,196 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-06T10:12:05,196 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-06T10:12:05,197 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-06T10:12:09,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45929 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:12:09,899 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath, procId: 9 completed 2024-12-06T10:12:09,903 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-06T10:12:09,903 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. 2024-12-06T10:12:09,933 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:12:09,938 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:12:09,940 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:12:09,940 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:12:09,940 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:12:09,944 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a8a53d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:12:09,945 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a5b3132{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:12:10,064 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49dbde5b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/java.io.tmpdir/jetty-localhost-45341-hadoop-hdfs-3_4_1-tests_jar-_-any-15156114478706593194/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:12:10,064 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10289dc3{HTTP/1.1, (http/1.1)}{localhost:45341} 2024-12-06T10:12:10,064 INFO [Time-limited test {}] server.Server(415): Started @131356ms 2024-12-06T10:12:10,066 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:12:10,103 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:12:10,108 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:12:10,109 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:12:10,109 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:12:10,109 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:12:10,110 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b6caab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:12:10,110 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@493e02d8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:12:10,167 WARN [Thread-631 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data5/current/BP-2117338848-172.17.0.2-1733479916995/current, will proceed with Du for space computation calculation, 2024-12-06T10:12:10,167 WARN [Thread-632 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data6/current/BP-2117338848-172.17.0.2-1733479916995/current, will proceed with Du for space computation calculation, 2024-12-06T10:12:10,187 WARN [Thread-611 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:12:10,190 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc135b74de77ad06 with lease ID 0xb696fa3a183d7322: Processing first storage report for DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d from datanode DatanodeRegistration(127.0.0.1:40795, datanodeUuid=83d8a4dc-b5f3-47a2-bd13-ec27cee13f76, infoPort=37643, infoSecurePort=0, ipcPort=41417, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995) 2024-12-06T10:12:10,190 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc135b74de77ad06 with lease ID 0xb696fa3a183d7322: from storage DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d node DatanodeRegistration(127.0.0.1:40795, datanodeUuid=83d8a4dc-b5f3-47a2-bd13-ec27cee13f76, infoPort=37643, infoSecurePort=0, ipcPort=41417, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:12:10,190 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc135b74de77ad06 with lease ID 0xb696fa3a183d7322: Processing first storage report for DS-32eeb4a8-20e4-4238-9c86-770ad215923c from datanode DatanodeRegistration(127.0.0.1:40795, datanodeUuid=83d8a4dc-b5f3-47a2-bd13-ec27cee13f76, infoPort=37643, infoSecurePort=0, ipcPort=41417, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995) 2024-12-06T10:12:10,190 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc135b74de77ad06 with lease ID 0xb696fa3a183d7322: from storage DS-32eeb4a8-20e4-4238-9c86-770ad215923c node DatanodeRegistration(127.0.0.1:40795, datanodeUuid=83d8a4dc-b5f3-47a2-bd13-ec27cee13f76, infoPort=37643, infoSecurePort=0, ipcPort=41417, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:12:10,238 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67e0ea44{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/java.io.tmpdir/jetty-localhost-44069-hadoop-hdfs-3_4_1-tests_jar-_-any-2292255149592347131/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:12:10,239 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64f4bc9a{HTTP/1.1, (http/1.1)}{localhost:44069} 2024-12-06T10:12:10,239 INFO [Time-limited test {}] server.Server(415): Started @131530ms 2024-12-06T10:12:10,240 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:12:10,288 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T10:12:10,291 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:10,315 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:10,327 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:12:10,331 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:12:10,331 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:12:10,331 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:12:10,332 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:12:10,333 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@186aeb49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:12:10,333 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f2fc02f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:12:10,335 WARN [Thread-667 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data7/current/BP-2117338848-172.17.0.2-1733479916995/current, will proceed with Du for space computation calculation, 2024-12-06T10:12:10,335 WARN [Thread-668 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data8/current/BP-2117338848-172.17.0.2-1733479916995/current, will proceed with Du for space computation calculation, 2024-12-06T10:12:10,361 WARN [Thread-646 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:12:10,364 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbebbca9a30806b5c with lease ID 0xb696fa3a183d7323: Processing first storage report for DS-169570d1-0098-4e00-88ba-a3e63ca92f68 from datanode DatanodeRegistration(127.0.0.1:42781, datanodeUuid=51d9ec99-954d-41d9-a2fa-057b2221b6e0, infoPort=41325, infoSecurePort=0, ipcPort=36361, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995) 2024-12-06T10:12:10,365 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbebbca9a30806b5c with lease ID 0xb696fa3a183d7323: from storage DS-169570d1-0098-4e00-88ba-a3e63ca92f68 node DatanodeRegistration(127.0.0.1:42781, datanodeUuid=51d9ec99-954d-41d9-a2fa-057b2221b6e0, infoPort=41325, infoSecurePort=0, ipcPort=36361, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:12:10,365 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbebbca9a30806b5c with lease ID 0xb696fa3a183d7323: Processing first storage report for DS-3da4bcc7-8e92-4cb4-8a8d-4c9a6ed65187 from datanode DatanodeRegistration(127.0.0.1:42781, datanodeUuid=51d9ec99-954d-41d9-a2fa-057b2221b6e0, infoPort=41325, infoSecurePort=0, ipcPort=36361, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995) 2024-12-06T10:12:10,365 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbebbca9a30806b5c with lease ID 0xb696fa3a183d7323: from storage DS-3da4bcc7-8e92-4cb4-8a8d-4c9a6ed65187 node DatanodeRegistration(127.0.0.1:42781, datanodeUuid=51d9ec99-954d-41d9-a2fa-057b2221b6e0, infoPort=41325, infoSecurePort=0, ipcPort=36361, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:12:10,451 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59c9f3f9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/java.io.tmpdir/jetty-localhost-36203-hadoop-hdfs-3_4_1-tests_jar-_-any-3320302343443572616/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:12:10,451 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@63c7640d{HTTP/1.1, (http/1.1)}{localhost:36203} 2024-12-06T10:12:10,451 INFO [Time-limited test {}] server.Server(415): Started @131743ms 2024-12-06T10:12:10,453 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:12:10,545 WARN [Thread-693 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data9/current/BP-2117338848-172.17.0.2-1733479916995/current, will proceed with Du for space computation calculation, 2024-12-06T10:12:10,545 WARN [Thread-694 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data10/current/BP-2117338848-172.17.0.2-1733479916995/current, will proceed with Du for space computation calculation, 2024-12-06T10:12:10,569 WARN [Thread-682 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:12:10,572 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc3c603a14d1c1eef with lease ID 0xb696fa3a183d7324: Processing first storage report for DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75 from datanode DatanodeRegistration(127.0.0.1:33213, datanodeUuid=320f8d05-d9a2-4226-ad25-c226aac44fe2, infoPort=33565, infoSecurePort=0, ipcPort=44009, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995) 2024-12-06T10:12:10,572 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc3c603a14d1c1eef with lease ID 0xb696fa3a183d7324: from storage DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75 node DatanodeRegistration(127.0.0.1:33213, datanodeUuid=320f8d05-d9a2-4226-ad25-c226aac44fe2, infoPort=33565, infoSecurePort=0, ipcPort=44009, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:12:10,572 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc3c603a14d1c1eef with lease ID 0xb696fa3a183d7324: Processing first storage report for DS-c5f1dba6-4acc-42b9-a819-6e1bfce9e7c9 from datanode DatanodeRegistration(127.0.0.1:33213, datanodeUuid=320f8d05-d9a2-4226-ad25-c226aac44fe2, infoPort=33565, infoSecurePort=0, ipcPort=44009, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995) 2024-12-06T10:12:10,573 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc3c603a14d1c1eef with lease ID 0xb696fa3a183d7324: from storage DS-c5f1dba6-4acc-42b9-a819-6e1bfce9e7c9 node DatanodeRegistration(127.0.0.1:33213, datanodeUuid=320f8d05-d9a2-4226-ad25-c226aac44fe2, infoPort=33565, infoSecurePort=0, ipcPort=44009, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:12:10,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c586cce{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:12:10,582 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4d899dad{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:12:10,582 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:12:10,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29f7fced{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:12:10,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b628890{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,STOPPED} 2024-12-06T10:12:10,579 WARN [ResponseProcessor for block BP-2117338848-172.17.0.2-1733479916995:blk_1073741839_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2117338848-172.17.0.2-1733479916995:blk_1073741839_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:10,579 WARN [ResponseProcessor for block BP-2117338848-172.17.0.2-1733479916995:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2117338848-172.17.0.2-1733479916995:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:10,579 WARN [ResponseProcessor for block BP-2117338848-172.17.0.2-1733479916995:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2117338848-172.17.0.2-1733479916995:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-2117338848-172.17.0.2-1733479916995:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:10,584 WARN [DataStreamer for file /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/WALs/552d6a33fa09,45929,1733479918022/552d6a33fa09%2C45929%2C1733479918022.1733479918366 block BP-2117338848-172.17.0.2-1733479916995:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK], DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]) is bad. 2024-12-06T10:12:10,584 WARN [DataStreamer for file /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta block BP-2117338848-172.17.0.2-1733479916995:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]) is bad. 2024-12-06T10:12:10,585 WARN [DataStreamer for file /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,41575,1733479919747/552d6a33fa09%2C41575%2C1733479919747.1733479919991 block BP-2117338848-172.17.0.2-1733479916995:blk_1073741839_1015 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741839_1015 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK], DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]) is bad. 2024-12-06T10:12:10,579 WARN [ResponseProcessor for block BP-2117338848-172.17.0.2-1733479916995:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2117338848-172.17.0.2-1733479916995:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-2117338848-172.17.0.2-1733479916995:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:10,585 WARN [DataStreamer for file /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 block BP-2117338848-172.17.0.2-1733479916995:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]) is bad. 2024-12-06T10:12:10,586 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_846066635_22 at /127.0.0.1:44482 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44482 dst: /127.0.0.1:37505 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:10,585 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_846066635_22 at /127.0.0.1:51090 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39641:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51090 dst: /127.0.0.1:39641 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49720 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:10,587 WARN [BP-2117338848-172.17.0.2-1733479916995 heartbeating to localhost/127.0.0.1:45977 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:12:10,587 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:12:10,586 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:51118 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39641:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51118 dst: /127.0.0.1:39641 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49715 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:10,587 WARN [BP-2117338848-172.17.0.2-1733479916995 heartbeating to localhost/127.0.0.1:45977 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2117338848-172.17.0.2-1733479916995 (Datanode Uuid 0f76445e-e290-4916-b442-643b7633d531) service to localhost/127.0.0.1:45977 2024-12-06T10:12:10,587 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:12:10,586 WARN [PacketResponder: BP-2117338848-172.17.0.2-1733479916995:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39641] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:10,585 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:51112 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39641:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51112 dst: /127.0.0.1:39641 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49703 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:10,585 WARN [PacketResponder: BP-2117338848-172.17.0.2-1733479916995:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39641] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:10,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data3/current/BP-2117338848-172.17.0.2-1733479916995 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:12:10,588 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:44520 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44520 dst: /127.0.0.1:37505 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:10,587 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1722598719_22 at /127.0.0.1:51164 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:39641:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51164 dst: /127.0.0.1:39641 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49415 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:10,586 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1722598719_22 at /127.0.0.1:44576 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:37505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44576 dst: /127.0.0.1:37505 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:10,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data4/current/BP-2117338848-172.17.0.2-1733479916995 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:12:10,589 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:44508 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44508 dst: /127.0.0.1:37505 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:10,589 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:12:10,592 WARN [DataStreamer for file /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/WALs/552d6a33fa09,45929,1733479918022/552d6a33fa09%2C45929%2C1733479918022.1733479918366 block BP-2117338848-172.17.0.2-1733479916995:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:10,592 WARN [DataStreamer for file /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 block BP-2117338848-172.17.0.2-1733479916995:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:10,592 WARN [DataStreamer for file /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta block BP-2117338848-172.17.0.2-1733479916995:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:10,592 WARN [DataStreamer for file /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,41575,1733479919747/552d6a33fa09%2C41575%2C1733479919747.1733479919991 block BP-2117338848-172.17.0.2-1733479916995:blk_1073741839_1015 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1015 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:10,597 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b51cf53{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:12:10,597 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a2efd9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:12:10,597 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:12:10,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59774852{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:12:10,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e9304e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,STOPPED} 2024-12-06T10:12:10,599 WARN [BP-2117338848-172.17.0.2-1733479916995 heartbeating to localhost/127.0.0.1:45977 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:12:10,599 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:12:10,599 WARN [BP-2117338848-172.17.0.2-1733479916995 heartbeating to localhost/127.0.0.1:45977 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2117338848-172.17.0.2-1733479916995 (Datanode Uuid 0eda97cd-fa7c-4eb7-8b05-ca9e586e925b) service to localhost/127.0.0.1:45977 2024-12-06T10:12:10,599 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:12:10,600 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data1/current/BP-2117338848-172.17.0.2-1733479916995 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:12:10,600 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data2/current/BP-2117338848-172.17.0.2-1733479916995 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:12:10,601 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:12:10,606 WARN [RS:0;552d6a33fa09:39435.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=4, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:10,606 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C39435%2C1733479918098:(num 1733479918709) roll requested 2024-12-06T10:12:10,606 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C39435%2C1733479918098.1733479930606 2024-12-06T10:12:10,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39435 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:10,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39435 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53838 deadline: 1733479940605, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-06T10:12:10,609 WARN [Thread-704 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1020 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:10,609 WARN [Thread-704 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK], DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]) is bad. 2024-12-06T10:12:10,610 WARN [Thread-704 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741840_1020 2024-12-06T10:12:10,612 WARN [Thread-704 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK] 2024-12-06T10:12:10,619 WARN [regionserver/552d6a33fa09:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-06T10:12:10,619 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 with entries=4, filesize=959 B; new WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479930606 2024-12-06T10:12:10,620 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33565:33565),(127.0.0.1/127.0.0.1:37643:37643)] 2024-12-06T10:12:10,620 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 is not closed yet, will try archiving it next time 2024-12-06T10:12:10,620 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:10,620 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:10,621 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-06T10:12:10,621 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-06T10:12:10,621 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 2024-12-06T10:12:10,624 WARN [IPC Server handler 0 on default port 45977 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741833_1009 2024-12-06T10:12:10,626 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 after 5ms 2024-12-06T10:12:14,627 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 after 4006ms 2024-12-06T10:12:22,670 INFO [Time-limited test {}] wal.TestLogRolling(243): log.getCurrentFileName(): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479930606 2024-12-06T10:12:22,671 WARN [ResponseProcessor for block BP-2117338848-172.17.0.2-1733479916995:blk_1073741841_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2117338848-172.17.0.2-1733479916995:blk_1073741841_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:22,672 WARN [DataStreamer for file /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479930606 block BP-2117338848-172.17.0.2-1733479916995:blk_1073741841_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741841_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK], DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]) is bad. 2024-12-06T10:12:22,672 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:40422 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741841_1021] {}] datanode.DataXceiver(331): 127.0.0.1:40795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40422 dst: /127.0.0.1:40795 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:22,672 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:59058 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741841_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59058 dst: /127.0.0.1:33213 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:22,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59c9f3f9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:12:22,674 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@63c7640d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:12:22,674 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:12:22,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f2fc02f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:12:22,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@186aeb49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,STOPPED} 2024-12-06T10:12:22,677 WARN [BP-2117338848-172.17.0.2-1733479916995 heartbeating to localhost/127.0.0.1:45977 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:12:22,677 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:12:22,677 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:12:22,677 WARN [BP-2117338848-172.17.0.2-1733479916995 heartbeating to localhost/127.0.0.1:45977 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2117338848-172.17.0.2-1733479916995 (Datanode Uuid 320f8d05-d9a2-4226-ad25-c226aac44fe2) service to localhost/127.0.0.1:45977 2024-12-06T10:12:22,678 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data9/current/BP-2117338848-172.17.0.2-1733479916995 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:12:22,678 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data10/current/BP-2117338848-172.17.0.2-1733479916995 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:12:22,678 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:12:22,681 WARN [sync.1 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]] 2024-12-06T10:12:22,681 WARN [sync.1 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]] 2024-12-06T10:12:22,681 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C39435%2C1733479918098:(num 1733479930606) roll requested 2024-12-06T10:12:22,681 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C39435%2C1733479918098.1733479942681 2024-12-06T10:12:22,684 WARN [Thread-713 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:22,685 WARN [Thread-713 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK], DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]) is bad. 2024-12-06T10:12:22,685 WARN [Thread-713 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741842_1024 2024-12-06T10:12:22,685 WARN [Thread-713 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK] 2024-12-06T10:12:22,687 WARN [Thread-713 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:22,687 WARN [Thread-713 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK], DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]) is bad. 2024-12-06T10:12:22,687 WARN [Thread-713 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741843_1025 2024-12-06T10:12:22,687 WARN [Thread-713 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK] 2024-12-06T10:12:22,689 WARN [Thread-713 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39641 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:22,689 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:53168 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741844_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data8]'}, localName='127.0.0.1:42781', datanodeUuid='51d9ec99-954d-41d9-a2fa-057b2221b6e0', xmitsInProgress=0}:Exception transferring block BP-2117338848-172.17.0.2-1733479916995:blk_1073741844_1026 to mirror 127.0.0.1:39641 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:22,690 WARN [Thread-713 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741844_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]) is bad. 2024-12-06T10:12:22,690 WARN [Thread-713 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741844_1026 2024-12-06T10:12:22,690 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:53168 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741844_1026] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-06T10:12:22,690 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:53168 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741844_1026] {}] datanode.DataXceiver(331): 127.0.0.1:42781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53168 dst: /127.0.0.1:42781 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:22,690 WARN [Thread-713 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK] 2024-12-06T10:12:22,696 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479930606 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479942681 2024-12-06T10:12:22,696 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37643:37643),(127.0.0.1/127.0.0.1:41325:41325)] 2024-12-06T10:12:22,696 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 is not closed yet, will try archiving it next time 2024-12-06T10:12:22,696 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479930606 is not closed yet, will try archiving it next time 2024-12-06T10:12:22,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741841_1023 (size=2431) 2024-12-06T10:12:23,099 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 is not closed yet, will try archiving it next time 2024-12-06T10:12:25,199 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@11bd76a6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40795, datanodeUuid=83d8a4dc-b5f3-47a2-bd13-ec27cee13f76, infoPort=37643, infoSecurePort=0, ipcPort=41417, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741841_1023 to 127.0.0.1:33213 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:26,684 WARN [ResponseProcessor for block BP-2117338848-172.17.0.2-1733479916995:blk_1073741845_1027 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2117338848-172.17.0.2-1733479916995:blk_1073741845_1027 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:26,685 WARN [DataStreamer for file /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479942681 block BP-2117338848-172.17.0.2-1733479916995:blk_1073741845_1027 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741845_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK], DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]) is bad. 2024-12-06T10:12:26,685 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:34048 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741845_1027] {}] datanode.DataXceiver(331): 127.0.0.1:40795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34048 dst: /127.0.0.1:40795 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:26,685 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:53180 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741845_1027] {}] datanode.DataXceiver(331): 127.0.0.1:42781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53180 dst: /127.0.0.1:42781 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:26,687 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49dbde5b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:12:26,688 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10289dc3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:12:26,688 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:12:26,688 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a5b3132{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:12:26,688 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a8a53d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,STOPPED} 2024-12-06T10:12:26,690 WARN [BP-2117338848-172.17.0.2-1733479916995 heartbeating to localhost/127.0.0.1:45977 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:12:26,690 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:12:26,690 WARN [BP-2117338848-172.17.0.2-1733479916995 heartbeating to localhost/127.0.0.1:45977 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2117338848-172.17.0.2-1733479916995 (Datanode Uuid 83d8a4dc-b5f3-47a2-bd13-ec27cee13f76) service to localhost/127.0.0.1:45977 2024-12-06T10:12:26,690 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:12:26,691 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data5/current/BP-2117338848-172.17.0.2-1733479916995 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:12:26,691 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data6/current/BP-2117338848-172.17.0.2-1733479916995 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:12:26,691 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:12:26,694 WARN [sync.4 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK]] 2024-12-06T10:12:26,694 WARN [sync.4 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK]] 2024-12-06T10:12:26,694 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C39435%2C1733479918098:(num 1733479942681) roll requested 2024-12-06T10:12:26,694 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C39435%2C1733479918098.1733479946694 2024-12-06T10:12:26,697 WARN [Thread-725 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:26,698 WARN [Thread-725 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]) is bad. 2024-12-06T10:12:26,698 WARN [Thread-725 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741846_1029 2024-12-06T10:12:26,698 WARN [Thread-725 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK] 2024-12-06T10:12:26,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39435 {}] regionserver.HRegion(8581): Flush requested on 3a2059af8836cf786145efe06400336b 2024-12-06T10:12:26,699 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3a2059af8836cf786145efe06400336b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T10:12:26,700 WARN [Thread-725 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:26,700 WARN [Thread-725 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK], DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]) is bad. 2024-12-06T10:12:26,700 WARN [Thread-725 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741847_1030 2024-12-06T10:12:26,701 WARN [Thread-725 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK] 2024-12-06T10:12:26,703 WARN [Thread-725 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:26,703 WARN [Thread-725 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK], DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]) is bad. 2024-12-06T10:12:26,703 WARN [Thread-725 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741848_1031 2024-12-06T10:12:26,704 WARN [Thread-725 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK] 2024-12-06T10:12:26,705 WARN [Thread-725 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:26,705 WARN [Thread-725 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK], DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]) is bad. 2024-12-06T10:12:26,706 WARN [Thread-725 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741849_1032 2024-12-06T10:12:26,706 WARN [Thread-725 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK] 2024-12-06T10:12:26,707 WARN [IPC Server handler 1 on default port 45977 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-06T10:12:26,707 WARN [IPC Server handler 1 on default port 45977 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-06T10:12:26,707 WARN [IPC Server handler 1 on default port 45977 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-06T10:12:26,715 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479942681 with entries=13, filesize=14.10 KB; new WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479946694 2024-12-06T10:12:26,715 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41325:41325)] 2024-12-06T10:12:26,715 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 is not closed yet, will try archiving it next time 2024-12-06T10:12:26,715 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479942681 is not closed yet, will try archiving it next time 2024-12-06T10:12:26,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741845_1028 (size=14443) 2024-12-06T10:12:26,718 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 is not closed yet, will try archiving it next time 2024-12-06T10:12:26,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/.tmp/info/d622c318b8ce4458976342b4d28826b0 is 1080, key is row0002/info:/1733479942679/Put/seqid=0 2024-12-06T10:12:26,734 WARN [Thread-726 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:26,734 WARN [Thread-726 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK], DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]) is bad. 2024-12-06T10:12:26,734 WARN [Thread-726 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741851_1034 2024-12-06T10:12:26,735 WARN [Thread-726 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK] 2024-12-06T10:12:26,736 WARN [Thread-726 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:26,737 WARN [Thread-726 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK], DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]) is bad. 2024-12-06T10:12:26,737 WARN [Thread-726 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741852_1035 2024-12-06T10:12:26,737 WARN [Thread-726 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK] 2024-12-06T10:12:26,739 WARN [Thread-726 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33213 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:26,739 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:53204 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data8]'}, localName='127.0.0.1:42781', datanodeUuid='51d9ec99-954d-41d9-a2fa-057b2221b6e0', xmitsInProgress=0}:Exception transferring block BP-2117338848-172.17.0.2-1733479916995:blk_1073741853_1036 to mirror 127.0.0.1:33213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:26,740 WARN [Thread-726 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK], DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]) is bad. 2024-12-06T10:12:26,740 WARN [Thread-726 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741853_1036 2024-12-06T10:12:26,740 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:53204 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-06T10:12:26,740 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:53204 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:42781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53204 dst: /127.0.0.1:42781 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:26,740 WARN [Thread-726 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK] 2024-12-06T10:12:26,741 WARN [Thread-726 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:26,742 WARN [Thread-726 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK], DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]) is bad. 2024-12-06T10:12:26,742 WARN [Thread-726 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741854_1037 2024-12-06T10:12:26,742 WARN [Thread-726 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK] 2024-12-06T10:12:26,743 WARN [IPC Server handler 4 on default port 45977 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-06T10:12:26,743 WARN [IPC Server handler 4 on default port 45977 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-06T10:12:26,743 WARN [IPC Server handler 4 on default port 45977 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-06T10:12:26,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741855_1038 (size=10347) 2024-12-06T10:12:26,914 WARN [sync.2 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK]] 2024-12-06T10:12:26,914 WARN [sync.2 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK]] 2024-12-06T10:12:26,914 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C39435%2C1733479918098:(num 1733479946694) roll requested 2024-12-06T10:12:26,915 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C39435%2C1733479918098.1733479946914 2024-12-06T10:12:26,918 WARN [Thread-734 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:26,918 WARN [Thread-734 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK], DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]) is bad. 2024-12-06T10:12:26,918 WARN [Thread-734 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741856_1039 2024-12-06T10:12:26,919 WARN [Thread-734 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK] 2024-12-06T10:12:26,920 WARN [Thread-734 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:26,920 WARN [Thread-734 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK], DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]) is bad. 2024-12-06T10:12:26,920 WARN [Thread-734 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741857_1040 2024-12-06T10:12:26,921 WARN [Thread-734 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK] 2024-12-06T10:12:26,923 WARN [Thread-734 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37505 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:26,923 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:53224 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data8]'}, localName='127.0.0.1:42781', datanodeUuid='51d9ec99-954d-41d9-a2fa-057b2221b6e0', xmitsInProgress=0}:Exception transferring block BP-2117338848-172.17.0.2-1733479916995:blk_1073741858_1041 to mirror 127.0.0.1:37505 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:26,923 WARN [Thread-734 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK], DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]) is bad. 2024-12-06T10:12:26,924 WARN [Thread-734 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741858_1041 2024-12-06T10:12:26,924 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:53224 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-06T10:12:26,924 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:53224 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:42781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53224 dst: /127.0.0.1:42781 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:26,924 WARN [Thread-734 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK] 2024-12-06T10:12:26,926 WARN [Thread-734 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39641 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:26,926 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:53238 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data8]'}, localName='127.0.0.1:42781', datanodeUuid='51d9ec99-954d-41d9-a2fa-057b2221b6e0', xmitsInProgress=0}:Exception transferring block BP-2117338848-172.17.0.2-1733479916995:blk_1073741859_1042 to mirror 127.0.0.1:39641 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:26,926 WARN [Thread-734 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK]) is bad. 2024-12-06T10:12:26,926 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:53238 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-06T10:12:26,927 WARN [Thread-734 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741859_1042 2024-12-06T10:12:26,927 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:53238 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:42781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53238 dst: /127.0.0.1:42781 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:26,927 WARN [Thread-734 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39641,DS-ab367aac-e619-482e-8f82-86fdb4bfa97c,DISK] 2024-12-06T10:12:26,928 WARN [IPC Server handler 2 on default port 45977 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-06T10:12:26,928 WARN [IPC Server handler 2 on default port 45977 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-06T10:12:26,928 WARN [IPC Server handler 2 on default port 45977 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-06T10:12:26,932 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479946694 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479946914 2024-12-06T10:12:26,932 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41325:41325)] 2024-12-06T10:12:26,932 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 is not closed yet, will try archiving it next time 2024-12-06T10:12:26,932 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479946694 is not closed yet, will try archiving it next time 2024-12-06T10:12:26,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741850_1033 (size=1261) 2024-12-06T10:12:27,116 WARN [sync.4 {}] wal.FSHLog(760): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-06T10:12:27,148 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/.tmp/info/d622c318b8ce4458976342b4d28826b0 2024-12-06T10:12:27,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/.tmp/info/d622c318b8ce4458976342b4d28826b0 as hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/info/d622c318b8ce4458976342b4d28826b0 2024-12-06T10:12:27,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/info/d622c318b8ce4458976342b4d28826b0, entries=5, sequenceid=12, filesize=10.1 K 2024-12-06T10:12:27,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 3a2059af8836cf786145efe06400336b in 465ms, sequenceid=12, compaction requested=false 2024-12-06T10:12:27,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3a2059af8836cf786145efe06400336b: 2024-12-06T10:12:27,327 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:12:27,331 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:12:27,332 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:12:27,332 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:12:27,332 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:12:27,333 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e71e004{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:12:27,333 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d4b6e41{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:12:27,334 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479918709 is not closed yet, will try archiving it next time 2024-12-06T10:12:27,335 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479930606 to hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/oldWALs/552d6a33fa09%2C39435%2C1733479918098.1733479930606 2024-12-06T10:12:27,368 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7a9ff25[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42781, datanodeUuid=51d9ec99-954d-41d9-a2fa-057b2221b6e0, infoPort=41325, infoSecurePort=0, ipcPort=36361, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741845_1028 to 127.0.0.1:37505 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:27,368 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6ee62ca6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42781, datanodeUuid=51d9ec99-954d-41d9-a2fa-057b2221b6e0, infoPort=41325, infoSecurePort=0, ipcPort=36361, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741855_1038 to 127.0.0.1:33213 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:27,450 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d00cc18{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/java.io.tmpdir/jetty-localhost-44587-hadoop-hdfs-3_4_1-tests_jar-_-any-4609719772882589410/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:12:27,450 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@12905360{HTTP/1.1, (http/1.1)}{localhost:44587} 2024-12-06T10:12:27,450 INFO [Time-limited test {}] server.Server(415): Started @148742ms 2024-12-06T10:12:27,452 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:12:27,552 WARN [Thread-758 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:12:27,561 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48d09dbd537b8b24 with lease ID 0xb696fa3a183d7325: from storage DS-ab367aac-e619-482e-8f82-86fdb4bfa97c node DatanodeRegistration(127.0.0.1:44071, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=42831, infoSecurePort=0, ipcPort=42475, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T10:12:27,561 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48d09dbd537b8b24 with lease ID 0xb696fa3a183d7325: from storage DS-aec44971-df45-419f-8221-dffb6a780dfe node DatanodeRegistration(127.0.0.1:44071, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=42831, infoSecurePort=0, ipcPort=42475, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:12:27,985 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:12:28,367 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6ee62ca6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42781, datanodeUuid=51d9ec99-954d-41d9-a2fa-057b2221b6e0, infoPort=41325, infoSecurePort=0, ipcPort=36361, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741850_1033 to 127.0.0.1:33213 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:28,474 WARN [master/552d6a33fa09:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=96, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:28,474 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C45929%2C1733479918022:(num 1733479918366) roll requested 2024-12-06T10:12:28,475 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:28,475 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45929%2C1733479918022.1733479948475 2024-12-06T10:12:28,475 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:28,478 WARN [Thread-778 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:28,479 WARN [Thread-778 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK], DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]) is bad. 2024-12-06T10:12:28,479 WARN [Thread-778 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741861_1044 2024-12-06T10:12:28,479 WARN [Thread-778 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK] 2024-12-06T10:12:28,481 WARN [Thread-778 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:28,481 WARN [Thread-778 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK], DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]) is bad. 2024-12-06T10:12:28,481 WARN [Thread-778 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741862_1045 2024-12-06T10:12:28,482 WARN [Thread-778 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK] 2024-12-06T10:12:28,484 WARN [Thread-778 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40795 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:28,485 WARN [Thread-778 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK], DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]) is bad. 2024-12-06T10:12:28,484 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_846066635_22 at /127.0.0.1:53264 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data8]'}, localName='127.0.0.1:42781', datanodeUuid='51d9ec99-954d-41d9-a2fa-057b2221b6e0', xmitsInProgress=0}:Exception transferring block BP-2117338848-172.17.0.2-1733479916995:blk_1073741863_1046 to mirror 127.0.0.1:40795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:28,485 WARN [Thread-778 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741863_1046 2024-12-06T10:12:28,485 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_846066635_22 at /127.0.0.1:53264 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-06T10:12:28,485 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_846066635_22 at /127.0.0.1:53264 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:42781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53264 dst: /127.0.0.1:42781 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:28,485 WARN [Thread-778 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK] 2024-12-06T10:12:28,490 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL 2024-12-06T10:12:28,491 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/WALs/552d6a33fa09,45929,1733479918022/552d6a33fa09%2C45929%2C1733479918022.1733479918366 with entries=93, filesize=46.04 KB; new WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/WALs/552d6a33fa09,45929,1733479918022/552d6a33fa09%2C45929%2C1733479918022.1733479948475 2024-12-06T10:12:28,491 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42831:42831),(127.0.0.1/127.0.0.1:41325:41325)] 2024-12-06T10:12:28,491 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/WALs/552d6a33fa09,45929,1733479918022/552d6a33fa09%2C45929%2C1733479918022.1733479918366 is not closed yet, will try archiving it next time 2024-12-06T10:12:28,491 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:28,492 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:28,492 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/WALs/552d6a33fa09,45929,1733479918022/552d6a33fa09%2C45929%2C1733479918022.1733479918366 2024-12-06T10:12:28,492 WARN [IPC Server handler 1 on default port 45977 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/WALs/552d6a33fa09,45929,1733479918022/552d6a33fa09%2C45929%2C1733479918022.1733479918366 has not been closed. Lease recovery is in progress. RecoveryId = 1048 for block blk_1073741830_1006 2024-12-06T10:12:28,492 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/WALs/552d6a33fa09,45929,1733479918022/552d6a33fa09%2C45929%2C1733479918022.1733479918366 after 0ms 2024-12-06T10:12:30,526 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:12:30,527 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59470, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:12:31,799 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:12:31,801 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59472, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:12:32,494 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/WALs/552d6a33fa09,45929,1733479918022/552d6a33fa09%2C45929%2C1733479918022.1733479918366 after 4002ms 2024-12-06T10:12:33,557 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6890627f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44071, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=42831, infoSecurePort=0, ipcPort=42475, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741838_1014 to 127.0.0.1:33213 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:33,557 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7e15d69a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44071, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=42831, infoSecurePort=0, ipcPort=42475, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741836_1012 to 127.0.0.1:40795 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:34,557 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7e15d69a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44071, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=42831, infoSecurePort=0, ipcPort=42475, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741832_1008 to 127.0.0.1:33213 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:34,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:12:36,557 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6890627f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44071, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=42831, infoSecurePort=0, ipcPort=42475, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741829_1005 to 127.0.0.1:33213 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:36,557 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7e15d69a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44071, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=42831, infoSecurePort=0, ipcPort=42475, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741827_1003 to 127.0.0.1:33213 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:37,556 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7e15d69a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44071, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=42831, infoSecurePort=0, ipcPort=42475, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741825_1001 to 127.0.0.1:40795 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:37,573 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5903dfb9 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2117338848-172.17.0.2-1733479916995:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:37505,null,null]) java.net.ConnectException: Call From 552d6a33fa09/172.17.0.2 to localhost:46695 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-06T10:12:37,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44071 is added to blk_1073741833_1022 (size=959) 2024-12-06T10:12:39,557 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7e15d69a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44071, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=42831, infoSecurePort=0, ipcPort=42475, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741826_1002 to 127.0.0.1:33213 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:39,557 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6890627f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44071, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=42831, infoSecurePort=0, ipcPort=42475, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741837_1013 to 127.0.0.1:40795 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:39,952 INFO [master/552d6a33fa09:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T10:12:39,952 INFO [master/552d6a33fa09:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T10:12:40,557 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7e15d69a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44071, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=42831, infoSecurePort=0, ipcPort=42475, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741831_1007 to 127.0.0.1:40795 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:40,557 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6890627f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44071, datanodeUuid=0f76445e-e290-4916-b442-643b7633d531, infoPort=42831, infoSecurePort=0, ipcPort=42475, storageInfo=lv=-57;cid=testClusterID;nsid=1562383693;c=1733479916995):Failed to transfer BP-2117338848-172.17.0.2-1733479916995:blk_1073741835_1011 to 127.0.0.1:40795 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:42,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741833_1022 (size=959) 2024-12-06T10:12:45,273 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 3a2059af8836cf786145efe06400336b, had cached 0 bytes from a total of 10347 2024-12-06T10:12:46,230 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C39435%2C1733479918098.1733479966230 2024-12-06T10:12:46,234 WARN [Thread-792 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33213 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:46,234 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_846066635_22 at /127.0.0.1:42754 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741865_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data8]'}, localName='127.0.0.1:42781', datanodeUuid='51d9ec99-954d-41d9-a2fa-057b2221b6e0', xmitsInProgress=0}:Exception transferring block BP-2117338848-172.17.0.2-1733479916995:blk_1073741865_1049 to mirror 127.0.0.1:33213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:46,234 WARN [Thread-792 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741865_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK], DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]) is bad. 2024-12-06T10:12:46,235 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_846066635_22 at /127.0.0.1:42754 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741865_1049] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-06T10:12:46,235 WARN [Thread-792 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741865_1049 2024-12-06T10:12:46,235 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_846066635_22 at /127.0.0.1:42754 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741865_1049] {}] datanode.DataXceiver(331): 127.0.0.1:42781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42754 dst: /127.0.0.1:42781 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:46,235 WARN [Thread-792 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK] 2024-12-06T10:12:46,240 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479946914 with entries=2, filesize=1.57 KB; new WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479966230 2024-12-06T10:12:46,240 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41325:41325),(127.0.0.1/127.0.0.1:42831:42831)] 2024-12-06T10:12:46,240 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479946914 is not closed yet, will try archiving it next time 2024-12-06T10:12:46,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741860_1043 (size=1618) 2024-12-06T10:12:46,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39435 {}] regionserver.HRegion(8581): Flush requested on 3a2059af8836cf786145efe06400336b 2024-12-06T10:12:46,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3a2059af8836cf786145efe06400336b 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-06T10:12:46,244 INFO [sync.3 {}] wal.FSHLog(777): LowReplication-Roller was enabled. 2024-12-06T10:12:46,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/.tmp/info/e53eeb8e3e07400c9e9b101e7a239bd6 is 1080, key is row0007/info:/1733479946700/Put/seqid=0 2024-12-06T10:12:46,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:42772 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741867_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data8]'}, localName='127.0.0.1:42781', datanodeUuid='51d9ec99-954d-41d9-a2fa-057b2221b6e0', xmitsInProgress=0}:Exception transferring block BP-2117338848-172.17.0.2-1733479916995:blk_1073741867_1051 to mirror 127.0.0.1:40795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:46,251 WARN [Thread-799 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40795 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:46,251 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:42772 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741867_1051] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-06T10:12:46,251 WARN [Thread-799 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741867_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK], DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]) is bad. 2024-12-06T10:12:46,251 WARN [Thread-799 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741867_1051 2024-12-06T10:12:46,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:42772 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741867_1051] {}] datanode.DataXceiver(331): 127.0.0.1:42781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42772 dst: /127.0.0.1:42781 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:46,252 WARN [Thread-799 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK] 2024-12-06T10:12:46,257 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T10:12:46,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741868_1052 (size=13583) 2024-12-06T10:12:46,257 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T10:12:46,257 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62cd5620 to 127.0.0.1:52351 2024-12-06T10:12:46,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:12:46,257 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T10:12:46,257 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1949170568, stopped=false 2024-12-06T10:12:46,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44071 is added to blk_1073741868_1052 (size=13583) 2024-12-06T10:12:46,257 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=552d6a33fa09,45929,1733479918022 2024-12-06T10:12:46,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=25 (bloomFilter=true), to=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/.tmp/info/e53eeb8e3e07400c9e9b101e7a239bd6 2024-12-06T10:12:46,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41575-0x10066d0228b0003, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:12:46,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:12:46,260 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T10:12:46,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41575-0x10066d0228b0003, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:46,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:46,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:12:46,260 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:12:46,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:46,260 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,39435,1733479918098' ***** 2024-12-06T10:12:46,260 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T10:12:46,260 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,41575,1733479919747' ***** 2024-12-06T10:12:46,260 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T10:12:46,261 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T10:12:46,261 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T10:12:46,261 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T10:12:46,261 INFO [RS:1;552d6a33fa09:41575 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T10:12:46,261 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:12:46,261 INFO [RS:1;552d6a33fa09:41575 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T10:12:46,261 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,41575,1733479919747 2024-12-06T10:12:46,261 DEBUG [RS:1;552d6a33fa09:41575 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:12:46,261 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,41575,1733479919747; all regions closed. 2024-12-06T10:12:46,261 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:12:46,261 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41575-0x10066d0228b0003, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:12:46,262 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,41575,1733479919747 2024-12-06T10:12:46,262 WARN [WAL-Shutdown-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:46,263 ERROR [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer(1664): Shutdown / close of WAL failed: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... 2024-12-06T10:12:46,263 DEBUG [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer(1665): Shutdown / close exception details: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:46,263 DEBUG [RS:1;552d6a33fa09:41575 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:12:46,263 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:12:46,263 INFO [RS:1;552d6a33fa09:41575 {}] hbase.ChoreService(370): Chore service for: regionserver/552d6a33fa09:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T10:12:46,264 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T10:12:46,264 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T10:12:46,264 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T10:12:46,264 INFO [RS:1;552d6a33fa09:41575 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41575 2024-12-06T10:12:46,265 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:12:46,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41575-0x10066d0228b0003, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/552d6a33fa09,41575,1733479919747 2024-12-06T10:12:46,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:12:46,267 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [552d6a33fa09,41575,1733479919747] 2024-12-06T10:12:46,267 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 552d6a33fa09,41575,1733479919747; numProcessing=1 2024-12-06T10:12:46,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/.tmp/info/e53eeb8e3e07400c9e9b101e7a239bd6 as hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/info/e53eeb8e3e07400c9e9b101e7a239bd6 2024-12-06T10:12:46,269 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/552d6a33fa09,41575,1733479919747 already deleted, retry=false 2024-12-06T10:12:46,269 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 552d6a33fa09,41575,1733479919747 expired; onlineServers=1 2024-12-06T10:12:46,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/info/e53eeb8e3e07400c9e9b101e7a239bd6, entries=8, sequenceid=25, filesize=13.3 K 2024-12-06T10:12:46,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~10.50 KB/10757, heapSize ~11.48 KB/11760, currentSize=9.46 KB/9684 for 3a2059af8836cf786145efe06400336b in 33ms, sequenceid=25, compaction requested=false 2024-12-06T10:12:46,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3a2059af8836cf786145efe06400336b: 2024-12-06T10:12:46,275 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=23.4 K, sizeToCheck=16.0 K 2024-12-06T10:12:46,275 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T10:12:46,275 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/info/e53eeb8e3e07400c9e9b101e7a239bd6 because midkey is the same as first or last row 2024-12-06T10:12:46,276 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T10:12:46,276 INFO [RS:0;552d6a33fa09:39435 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T10:12:46,276 INFO [RS:0;552d6a33fa09:39435 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T10:12:46,276 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(3579): Received CLOSE for 3a2059af8836cf786145efe06400336b 2024-12-06T10:12:46,276 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(3579): Received CLOSE for 2d62f68be62f6454a46e6823da00beaf 2024-12-06T10:12:46,276 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,39435,1733479918098 2024-12-06T10:12:46,276 DEBUG [RS:0;552d6a33fa09:39435 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:12:46,276 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T10:12:46,276 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T10:12:46,276 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T10:12:46,276 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T10:12:46,276 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 3a2059af8836cf786145efe06400336b, disabling compactions & flushes 2024-12-06T10:12:46,276 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. 2024-12-06T10:12:46,276 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. 2024-12-06T10:12:46,276 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-06T10:12:46,276 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. after waiting 0 ms 2024-12-06T10:12:46,277 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. 2024-12-06T10:12:46,277 DEBUG [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1603): Online Regions={3a2059af8836cf786145efe06400336b=TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b., 1588230740=hbase:meta,,1.1588230740, 2d62f68be62f6454a46e6823da00beaf=hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf.} 2024-12-06T10:12:46,277 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 3a2059af8836cf786145efe06400336b 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-06T10:12:46,277 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:12:46,277 DEBUG [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 2d62f68be62f6454a46e6823da00beaf, 3a2059af8836cf786145efe06400336b 2024-12-06T10:12:46,277 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:12:46,277 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:12:46,277 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:12:46,277 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:12:46,277 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.87 KB heapSize=5.40 KB 2024-12-06T10:12:46,277 WARN [RS_OPEN_META-regionserver/552d6a33fa09:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:46,278 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C39435%2C1733479918098.meta:.meta(num 1733479919089) roll requested 2024-12-06T10:12:46,278 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:12:46,278 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C39435%2C1733479918098.meta.1733479966278.meta 2024-12-06T10:12:46,278 ERROR [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2808): ***** ABORTING region server 552d6a33fa09,39435,1733479918098: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:46,278 ERROR [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2815): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-06T10:12:46,281 WARN [Thread-807 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:46,281 WARN [Thread-807 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741869_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK], DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]) is bad. 2024-12-06T10:12:46,281 WARN [Thread-807 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741869_1053 2024-12-06T10:12:46,281 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-06T10:12:46,282 WARN [Thread-807 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK] 2024-12-06T10:12:46,283 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-06T10:12:46,283 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-06T10:12:46,283 WARN [Thread-807 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:46,283 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-06T10:12:46,283 WARN [Thread-807 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741870_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK], DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]) is bad. 2024-12-06T10:12:46,283 WARN [Thread-807 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741870_1054 2024-12-06T10:12:46,283 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2819): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 265989120 }, "NonHeapMemoryUsage": { "committed": 162070528, "init": 7667712, "max": -1, "used": 160213240 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-12-06T10:12:46,284 WARN [Thread-807 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK] 2024-12-06T10:12:46,284 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/.tmp/info/4ad7cc6ada31409282b2756a2f456e4d is 1080, key is row0014/info:/1733479966243/Put/seqid=0 2024-12-06T10:12:46,286 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45929 {}] master.MasterRpcServices(626): 552d6a33fa09,39435,1733479918098 reported a fatal error: ***** ABORTING region server 552d6a33fa09,39435,1733479918098: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** Cause: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) 2024-12-06T10:12:46,288 WARN [Thread-808 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33213 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:46,288 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:42810 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741872_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data8]'}, localName='127.0.0.1:42781', datanodeUuid='51d9ec99-954d-41d9-a2fa-057b2221b6e0', xmitsInProgress=0}:Exception transferring block BP-2117338848-172.17.0.2-1733479916995:blk_1073741872_1056 to mirror 127.0.0.1:33213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:46,288 WARN [Thread-808 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741872_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK], DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK]) is bad. 2024-12-06T10:12:46,288 WARN [Thread-808 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741872_1056 2024-12-06T10:12:46,288 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:42810 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741872_1056] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-06T10:12:46,288 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1151365070_22 at /127.0.0.1:42810 [Receiving block BP-2117338848-172.17.0.2-1733479916995:blk_1073741872_1056] {}] datanode.DataXceiver(331): 127.0.0.1:42781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42810 dst: /127.0.0.1:42781 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:12:46,288 WARN [Thread-808 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33213,DS-68e6ee15-a326-43d2-a9c7-b4ce3b675b75,DISK] 2024-12-06T10:12:46,289 WARN [Thread-808 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:46,290 WARN [Thread-808 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2117338848-172.17.0.2-1733479916995:blk_1073741873_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK], DatanodeInfoWithStorage[127.0.0.1:42781,DS-169570d1-0098-4e00-88ba-a3e63ca92f68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK]) is bad. 2024-12-06T10:12:46,290 WARN [Thread-808 {}] hdfs.DataStreamer(1850): Abandoning BP-2117338848-172.17.0.2-1733479916995:blk_1073741873_1057 2024-12-06T10:12:46,290 WARN [Thread-808 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40795,DS-9cc1635f-50ff-40fe-8a15-2ff41f0ce07d,DISK] 2024-12-06T10:12:46,299 WARN [regionserver/552d6a33fa09:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-06T10:12:46,299 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta with entries=11, filesize=3.63 KB; new WAL /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479966278.meta 2024-12-06T10:12:46,299 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41325:41325),(127.0.0.1/127.0.0.1:42831:42831)] 2024-12-06T10:12:46,299 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta is not closed yet, will try archiving it next time 2024-12-06T10:12:46,299 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:46,300 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37505,DS-012471da-beee-4a04-a715-f05c9493cd54,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:12:46,300 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta 2024-12-06T10:12:46,300 WARN [IPC Server handler 2 on default port 45977 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta has not been closed. Lease recovery is in progress. RecoveryId = 1059 for block blk_1073741834_1010 2024-12-06T10:12:46,300 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta after 0ms 2024-12-06T10:12:46,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741874_1058 (size=14663) 2024-12-06T10:12:46,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44071 is added to blk_1073741874_1058 (size=14663) 2024-12-06T10:12:46,304 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/.tmp/info/4ad7cc6ada31409282b2756a2f456e4d 2024-12-06T10:12:46,311 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/.tmp/info/4ad7cc6ada31409282b2756a2f456e4d as hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/info/4ad7cc6ada31409282b2756a2f456e4d 2024-12-06T10:12:46,317 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/info/4ad7cc6ada31409282b2756a2f456e4d, entries=9, sequenceid=37, filesize=14.3 K 2024-12-06T10:12:46,318 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 3a2059af8836cf786145efe06400336b in 41ms, sequenceid=37, compaction requested=true 2024-12-06T10:12:46,322 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/data/default/TestLogRolling-testLogRollOnDatanodeDeath/3a2059af8836cf786145efe06400336b/recovered.edits/40.seqid, newMaxSeqId=40, maxSeqId=1 2024-12-06T10:12:46,323 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. 2024-12-06T10:12:46,323 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 3a2059af8836cf786145efe06400336b: 2024-12-06T10:12:46,323 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733479919891.3a2059af8836cf786145efe06400336b. 2024-12-06T10:12:46,323 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 2d62f68be62f6454a46e6823da00beaf, disabling compactions & flushes 2024-12-06T10:12:46,323 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:12:46,323 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:12:46,323 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. after waiting 0 ms 2024-12-06T10:12:46,323 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:12:46,324 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 2d62f68be62f6454a46e6823da00beaf: 2024-12-06T10:12:46,324 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:12:46,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41575-0x10066d0228b0003, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:12:46,368 INFO [RS:1;552d6a33fa09:41575 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,41575,1733479919747; zookeeper connection closed. 2024-12-06T10:12:46,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41575-0x10066d0228b0003, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:12:46,368 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@441160b6 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@441160b6 2024-12-06T10:12:46,477 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T10:12:46,477 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(3579): Received CLOSE for 2d62f68be62f6454a46e6823da00beaf 2024-12-06T10:12:46,477 DEBUG [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 2d62f68be62f6454a46e6823da00beaf 2024-12-06T10:12:46,477 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 2d62f68be62f6454a46e6823da00beaf, disabling compactions & flushes 2024-12-06T10:12:46,477 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:12:46,477 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:12:46,477 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:12:46,477 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:12:46,478 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:12:46,478 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. after waiting 0 ms 2024-12-06T10:12:46,478 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:12:46,478 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:12:46,478 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:12:46,478 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 2d62f68be62f6454a46e6823da00beaf: 2024-12-06T10:12:46,478 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:12:46,478 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:namespace,,1733479919153.2d62f68be62f6454a46e6823da00beaf. 2024-12-06T10:12:46,478 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:meta,,1.1588230740 2024-12-06T10:12:46,566 INFO [regionserver/552d6a33fa09:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:12:46,604 INFO [regionserver/552d6a33fa09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-06T10:12:46,604 INFO [regionserver/552d6a33fa09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-06T10:12:46,643 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479942681 to hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/oldWALs/552d6a33fa09%2C39435%2C1733479918098.1733479942681 2024-12-06T10:12:46,644 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479946694 to hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/oldWALs/552d6a33fa09%2C39435%2C1733479918098.1733479946694 2024-12-06T10:12:46,645 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.1733479946914 to hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/oldWALs/552d6a33fa09%2C39435%2C1733479918098.1733479946914 2024-12-06T10:12:46,677 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1624): We were exiting though online regions are not empty, because some regions failed closing 2024-12-06T10:12:46,677 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,39435,1733479918098; all regions closed. 2024-12-06T10:12:46,678 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098 2024-12-06T10:12:46,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44071 is added to blk_1073741871_1055 (size=93) 2024-12-06T10:12:46,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741871_1055 (size=93) 2024-12-06T10:12:47,578 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@66c35f5 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2117338848-172.17.0.2-1733479916995:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:37505,null,null]) java.net.ConnectException: Call From 552d6a33fa09/172.17.0.2 to localhost:46695 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-06T10:12:47,837 INFO [regionserver/552d6a33fa09:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:12:48,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44071 is added to blk_1073741860_1043 (size=1618) 2024-12-06T10:12:48,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741830_1048 (size=47148) 2024-12-06T10:12:50,301 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta after 4001ms 2024-12-06T10:12:51,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:51,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:51,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:51,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:51,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:51,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:51,681 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-06T10:12:51,681 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098 2024-12-06T10:12:51,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44071 is added to blk_1073741866_1050 (size=13514) 2024-12-06T10:12:51,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741866_1050 (size=13514) 2024-12-06T10:12:51,685 DEBUG [RS:0;552d6a33fa09:39435 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:12:51,685 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:12:51,685 INFO [RS:0;552d6a33fa09:39435 {}] hbase.ChoreService(370): Chore service for: regionserver/552d6a33fa09:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T10:12:51,686 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:12:51,686 INFO [RS:0;552d6a33fa09:39435 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:39435 2024-12-06T10:12:51,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/552d6a33fa09,39435,1733479918098 2024-12-06T10:12:51,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:12:51,689 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [552d6a33fa09,39435,1733479918098] 2024-12-06T10:12:51,689 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 552d6a33fa09,39435,1733479918098; numProcessing=2 2024-12-06T10:12:51,691 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/552d6a33fa09,39435,1733479918098 already deleted, retry=false 2024-12-06T10:12:51,691 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 552d6a33fa09,39435,1733479918098 expired; onlineServers=0 2024-12-06T10:12:51,691 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,45929,1733479918022' ***** 2024-12-06T10:12:51,691 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T10:12:51,691 DEBUG [M:0;552d6a33fa09:45929 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@563746fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:12:51,691 INFO [M:0;552d6a33fa09:45929 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,45929,1733479918022 2024-12-06T10:12:51,691 INFO [M:0;552d6a33fa09:45929 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,45929,1733479918022; all regions closed. 2024-12-06T10:12:51,691 DEBUG [M:0;552d6a33fa09:45929 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:12:51,692 DEBUG [M:0;552d6a33fa09:45929 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T10:12:51,692 DEBUG [M:0;552d6a33fa09:45929 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T10:12:51,692 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T10:12:51,692 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733479918475 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733479918475,5,FailOnTimeoutGroup] 2024-12-06T10:12:51,692 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733479918475 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733479918475,5,FailOnTimeoutGroup] 2024-12-06T10:12:51,692 INFO [M:0;552d6a33fa09:45929 {}] hbase.ChoreService(370): Chore service for: master/552d6a33fa09:0 had [] on shutdown 2024-12-06T10:12:51,692 DEBUG [M:0;552d6a33fa09:45929 {}] master.HMaster(1733): Stopping service threads 2024-12-06T10:12:51,692 INFO [M:0;552d6a33fa09:45929 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T10:12:51,692 INFO [M:0;552d6a33fa09:45929 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T10:12:51,693 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T10:12:51,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T10:12:51,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:51,693 DEBUG [M:0;552d6a33fa09:45929 {}] zookeeper.ZKUtil(347): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T10:12:51,693 WARN [M:0;552d6a33fa09:45929 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T10:12:51,693 INFO [M:0;552d6a33fa09:45929 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T10:12:51,693 INFO [M:0;552d6a33fa09:45929 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T10:12:51,693 DEBUG [M:0;552d6a33fa09:45929 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:12:51,694 INFO [M:0;552d6a33fa09:45929 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:12:51,694 DEBUG [M:0;552d6a33fa09:45929 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:12:51,694 DEBUG [M:0;552d6a33fa09:45929 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:12:51,694 DEBUG [M:0;552d6a33fa09:45929 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:12:51,694 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:12:51,694 INFO [M:0;552d6a33fa09:45929 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.08 KB heapSize=49.29 KB 2024-12-06T10:12:51,718 DEBUG [M:0;552d6a33fa09:45929 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9808b688cddf427d9e79a7d8e815fc92 is 82, key is hbase:meta,,1/info:regioninfo/1733479919123/Put/seqid=0 2024-12-06T10:12:51,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741875_1060 (size=5672) 2024-12-06T10:12:51,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44071 is added to blk_1073741875_1060 (size=5672) 2024-12-06T10:12:51,724 INFO [M:0;552d6a33fa09:45929 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9808b688cddf427d9e79a7d8e815fc92 2024-12-06T10:12:51,753 DEBUG [M:0;552d6a33fa09:45929 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1d06f0d022d64dd281854c7323d8653b is 774, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733479920304/Put/seqid=0 2024-12-06T10:12:51,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741876_1061 (size=7465) 2024-12-06T10:12:51,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44071 is added to blk_1073741876_1061 (size=7465) 2024-12-06T10:12:51,759 INFO [M:0;552d6a33fa09:45929 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.41 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1d06f0d022d64dd281854c7323d8653b 2024-12-06T10:12:51,782 DEBUG [M:0;552d6a33fa09:45929 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a85b8c65e3d0418da879c74218e808c4 is 69, key is 552d6a33fa09,39435,1733479918098/rs:state/1733479918543/Put/seqid=0 2024-12-06T10:12:51,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44071 is added to blk_1073741877_1062 (size=5224) 2024-12-06T10:12:51,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741877_1062 (size=5224) 2024-12-06T10:12:51,788 INFO [M:0;552d6a33fa09:45929 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a85b8c65e3d0418da879c74218e808c4 2024-12-06T10:12:51,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:12:51,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39435-0x10066d0228b0001, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:12:51,790 INFO [RS:0;552d6a33fa09:39435 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,39435,1733479918098; zookeeper connection closed. 2024-12-06T10:12:51,790 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@f1a00eb {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@f1a00eb 2024-12-06T10:12:51,790 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-06T10:12:51,809 DEBUG [M:0;552d6a33fa09:45929 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e1d1d19a7f7d4850a39f5ea9e7dd7f79 is 52, key is load_balancer_on/state:d/1733479919727/Put/seqid=0 2024-12-06T10:12:51,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44071 is added to blk_1073741878_1063 (size=5056) 2024-12-06T10:12:51,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741878_1063 (size=5056) 2024-12-06T10:12:51,815 INFO [M:0;552d6a33fa09:45929 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e1d1d19a7f7d4850a39f5ea9e7dd7f79 2024-12-06T10:12:51,821 DEBUG [M:0;552d6a33fa09:45929 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9808b688cddf427d9e79a7d8e815fc92 as hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9808b688cddf427d9e79a7d8e815fc92 2024-12-06T10:12:51,826 INFO [M:0;552d6a33fa09:45929 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9808b688cddf427d9e79a7d8e815fc92, entries=8, sequenceid=97, filesize=5.5 K 2024-12-06T10:12:51,827 DEBUG [M:0;552d6a33fa09:45929 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1d06f0d022d64dd281854c7323d8653b as hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1d06f0d022d64dd281854c7323d8653b 2024-12-06T10:12:51,833 INFO [M:0;552d6a33fa09:45929 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1d06f0d022d64dd281854c7323d8653b, entries=11, sequenceid=97, filesize=7.3 K 2024-12-06T10:12:51,834 DEBUG [M:0;552d6a33fa09:45929 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a85b8c65e3d0418da879c74218e808c4 as hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a85b8c65e3d0418da879c74218e808c4 2024-12-06T10:12:51,840 INFO [M:0;552d6a33fa09:45929 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a85b8c65e3d0418da879c74218e808c4, entries=2, sequenceid=97, filesize=5.1 K 2024-12-06T10:12:51,841 DEBUG [M:0;552d6a33fa09:45929 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e1d1d19a7f7d4850a39f5ea9e7dd7f79 as hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e1d1d19a7f7d4850a39f5ea9e7dd7f79 2024-12-06T10:12:51,846 INFO [M:0;552d6a33fa09:45929 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e1d1d19a7f7d4850a39f5ea9e7dd7f79, entries=1, sequenceid=97, filesize=4.9 K 2024-12-06T10:12:51,847 INFO [M:0;552d6a33fa09:45929 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.08 KB/41039, heapSize ~49.23 KB/50408, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 153ms, sequenceid=97, compaction requested=false 2024-12-06T10:12:51,849 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T10:12:51,849 INFO [M:0;552d6a33fa09:45929 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:12:51,849 DEBUG [M:0;552d6a33fa09:45929 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:12:51,849 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/MasterData/WALs/552d6a33fa09,45929,1733479918022 2024-12-06T10:12:51,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:51,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42781 is added to blk_1073741864_1047 (size=757) 2024-12-06T10:12:51,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44071 is added to blk_1073741864_1047 (size=757) 2024-12-06T10:12:51,852 INFO [M:0;552d6a33fa09:45929 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T10:12:51,852 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:12:51,852 INFO [M:0;552d6a33fa09:45929 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45929 2024-12-06T10:12:51,854 DEBUG [M:0;552d6a33fa09:45929 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/552d6a33fa09,45929,1733479918022 already deleted, retry=false 2024-12-06T10:12:51,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:51,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:51,869 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:51,869 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:51,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:51,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:12:51,956 INFO [M:0;552d6a33fa09:45929 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,45929,1733479918022; zookeeper connection closed. 2024-12-06T10:12:51,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45929-0x10066d0228b0000, quorum=127.0.0.1:52351, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:12:51,959 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d00cc18{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:12:51,960 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@12905360{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:12:51,960 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:12:51,960 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d4b6e41{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:12:51,960 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e71e004{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,STOPPED} 2024-12-06T10:12:51,962 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:12:51,962 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:12:51,962 WARN [BP-2117338848-172.17.0.2-1733479916995 heartbeating to localhost/127.0.0.1:45977 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:12:51,961 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@392c20f6 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2117338848-172.17.0.2-1733479916995:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:37505,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:46695 , LocalHost:localPort 552d6a33fa09/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-06T10:12:51,962 WARN [BP-2117338848-172.17.0.2-1733479916995 heartbeating to localhost/127.0.0.1:45977 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2117338848-172.17.0.2-1733479916995 (Datanode Uuid 0f76445e-e290-4916-b442-643b7633d531) service to localhost/127.0.0.1:45977 2024-12-06T10:12:51,963 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data3/current/BP-2117338848-172.17.0.2-1733479916995 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:12:51,963 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data4/current/BP-2117338848-172.17.0.2-1733479916995 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:12:51,963 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@392c20f6 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-2117338848-172.17.0.2-1733479916995:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:37505,null,null], DatanodeInfoWithStorage[127.0.0.1:44071,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-2117338848-172.17.0.2-1733479916995 2024-12-06T10:12:51,964 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:12:51,966 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67e0ea44{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:12:51,966 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64f4bc9a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:12:51,966 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:12:51,966 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@493e02d8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:12:51,967 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b6caab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,STOPPED} 2024-12-06T10:12:51,968 WARN [BP-2117338848-172.17.0.2-1733479916995 heartbeating to localhost/127.0.0.1:45977 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:12:51,968 WARN [BP-2117338848-172.17.0.2-1733479916995 heartbeating to localhost/127.0.0.1:45977 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2117338848-172.17.0.2-1733479916995 (Datanode Uuid 51d9ec99-954d-41d9-a2fa-057b2221b6e0) service to localhost/127.0.0.1:45977 2024-12-06T10:12:51,968 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:12:51,968 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:12:51,968 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data7/current/BP-2117338848-172.17.0.2-1733479916995 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:12:51,969 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/cluster_8b6b75c6-f825-0be0-e52d-680c3cb2bbee/dfs/data/data8/current/BP-2117338848-172.17.0.2-1733479916995 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:12:51,969 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:12:51,975 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@69d3d453{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:12:51,976 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@52ca9eab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:12:51,976 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:12:51,976 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1101772a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:12:51,976 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39523ff9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir/,STOPPED} 2024-12-06T10:12:51,984 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T10:12:52,012 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-06T10:12:52,019 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=86 (was 62) Potentially hanging thread: RS-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-12-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-13-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45977 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:45977 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:45977 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:45977 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$793/0x00007fdfa8b83100.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45977 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45977 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-13-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:45977 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RS-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45977 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:45977 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-12-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-12-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-13-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=426 (was 403) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=222 (was 296), ProcessCount=11 (was 11), AvailableMemoryMB=6774 (was 7628) 2024-12-06T10:12:52,026 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=86, OpenFileDescriptor=426, MaxFileDescriptor=1048576, SystemLoadAverage=222, ProcessCount=11, AvailableMemoryMB=6774 2024-12-06T10:12:52,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T10:12:52,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.log.dir so I do NOT create it in target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2 2024-12-06T10:12:52,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/200aeb97-0369-295a-4c68-b87173b1b7aa/hadoop.tmp.dir so I do NOT create it in target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2 2024-12-06T10:12:52,026 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e, deleteOnExit=true 2024-12-06T10:12:52,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T10:12:52,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/test.cache.data in system properties and HBase conf 2024-12-06T10:12:52,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T10:12:52,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir in system properties and HBase conf 2024-12-06T10:12:52,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T10:12:52,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T10:12:52,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T10:12:52,027 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T10:12:52,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:12:52,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:12:52,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T10:12:52,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:12:52,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T10:12:52,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T10:12:52,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:12:52,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:12:52,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T10:12:52,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/nfs.dump.dir in system properties and HBase conf 2024-12-06T10:12:52,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/java.io.tmpdir in system properties and HBase conf 2024-12-06T10:12:52,028 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:12:52,028 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T10:12:52,028 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T10:12:52,041 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T10:12:52,108 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:12:52,113 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:12:52,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:12:52,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:12:52,117 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:12:52,118 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:12:52,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a7064e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:12:52,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75e986b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:12:52,234 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b6d77e9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/java.io.tmpdir/jetty-localhost-35987-hadoop-hdfs-3_4_1-tests_jar-_-any-14550109915604179744/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:12:52,235 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4af4f66f{HTTP/1.1, (http/1.1)}{localhost:35987} 2024-12-06T10:12:52,235 INFO [Time-limited test {}] server.Server(415): Started @173526ms 2024-12-06T10:12:52,248 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T10:12:52,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:12:52,313 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:12:52,316 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:12:52,317 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:12:52,317 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:12:52,317 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:12:52,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69cb648{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:12:52,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60c99d70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:12:52,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@af01be3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/java.io.tmpdir/jetty-localhost-43795-hadoop-hdfs-3_4_1-tests_jar-_-any-17673024076283175869/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:12:52,433 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e38b9fb{HTTP/1.1, (http/1.1)}{localhost:43795} 2024-12-06T10:12:52,433 INFO [Time-limited test {}] server.Server(415): Started @173724ms 2024-12-06T10:12:52,434 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:12:52,467 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:12:52,470 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:12:52,470 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:12:52,470 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:12:52,471 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:12:52,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@343e9b72{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:12:52,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@12436641{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:12:52,517 WARN [Thread-927 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data1/current/BP-1837903265-172.17.0.2-1733479972059/current, will proceed with Du for space computation calculation, 2024-12-06T10:12:52,517 WARN [Thread-928 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data2/current/BP-1837903265-172.17.0.2-1733479972059/current, will proceed with Du for space computation calculation, 2024-12-06T10:12:52,540 WARN [Thread-906 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:12:52,543 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25e3c37f041c8d2e with lease ID 0x3a716a104bf932ad: Processing first storage report for DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a from datanode DatanodeRegistration(127.0.0.1:35875, datanodeUuid=5ce381aa-be4d-4717-bb1c-4292513b0537, infoPort=41709, infoSecurePort=0, ipcPort=33025, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059) 2024-12-06T10:12:52,543 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25e3c37f041c8d2e with lease ID 0x3a716a104bf932ad: from storage DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a node DatanodeRegistration(127.0.0.1:35875, datanodeUuid=5ce381aa-be4d-4717-bb1c-4292513b0537, infoPort=41709, infoSecurePort=0, ipcPort=33025, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:12:52,543 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25e3c37f041c8d2e with lease ID 0x3a716a104bf932ad: Processing first storage report for DS-fbe484ac-2101-4132-baf5-b9b1360e2bb2 from datanode DatanodeRegistration(127.0.0.1:35875, datanodeUuid=5ce381aa-be4d-4717-bb1c-4292513b0537, infoPort=41709, infoSecurePort=0, ipcPort=33025, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059) 2024-12-06T10:12:52,543 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25e3c37f041c8d2e with lease ID 0x3a716a104bf932ad: from storage DS-fbe484ac-2101-4132-baf5-b9b1360e2bb2 node DatanodeRegistration(127.0.0.1:35875, datanodeUuid=5ce381aa-be4d-4717-bb1c-4292513b0537, infoPort=41709, infoSecurePort=0, ipcPort=33025, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:12:52,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@407bc9dd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/java.io.tmpdir/jetty-localhost-38617-hadoop-hdfs-3_4_1-tests_jar-_-any-9056424202586572658/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:12:52,593 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e862ae5{HTTP/1.1, (http/1.1)}{localhost:38617} 2024-12-06T10:12:52,594 INFO [Time-limited test {}] server.Server(415): Started @173885ms 2024-12-06T10:12:52,595 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:12:52,677 WARN [Thread-954 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data4/current/BP-1837903265-172.17.0.2-1733479972059/current, will proceed with Du for space computation calculation, 2024-12-06T10:12:52,677 WARN [Thread-953 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data3/current/BP-1837903265-172.17.0.2-1733479972059/current, will proceed with Du for space computation calculation, 2024-12-06T10:12:52,696 WARN [Thread-942 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:12:52,698 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb74e3c20b0359c5e with lease ID 0x3a716a104bf932ae: Processing first storage report for DS-ce785a61-022d-4529-b138-17b96cbc715b from datanode DatanodeRegistration(127.0.0.1:35385, datanodeUuid=1649f975-9c26-4aac-a105-dd32c7c565d5, infoPort=46787, infoSecurePort=0, ipcPort=37253, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059) 2024-12-06T10:12:52,698 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb74e3c20b0359c5e with lease ID 0x3a716a104bf932ae: from storage DS-ce785a61-022d-4529-b138-17b96cbc715b node DatanodeRegistration(127.0.0.1:35385, datanodeUuid=1649f975-9c26-4aac-a105-dd32c7c565d5, infoPort=46787, infoSecurePort=0, ipcPort=37253, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:12:52,698 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb74e3c20b0359c5e with lease ID 0x3a716a104bf932ae: Processing first storage report for DS-b6537024-efe5-41dd-a6b6-da932ff525f2 from datanode DatanodeRegistration(127.0.0.1:35385, datanodeUuid=1649f975-9c26-4aac-a105-dd32c7c565d5, infoPort=46787, infoSecurePort=0, ipcPort=37253, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059) 2024-12-06T10:12:52,698 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb74e3c20b0359c5e with lease ID 0x3a716a104bf932ae: from storage DS-b6537024-efe5-41dd-a6b6-da932ff525f2 node DatanodeRegistration(127.0.0.1:35385, datanodeUuid=1649f975-9c26-4aac-a105-dd32c7c565d5, infoPort=46787, infoSecurePort=0, ipcPort=37253, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:12:52,717 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2 2024-12-06T10:12:52,722 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/zookeeper_0, clientPort=54272, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T10:12:52,723 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=54272 2024-12-06T10:12:52,724 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:12:52,725 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:12:52,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35385 is added to blk_1073741825_1001 (size=7) 2024-12-06T10:12:52,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741825_1001 (size=7) 2024-12-06T10:12:52,737 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb with version=8 2024-12-06T10:12:52,737 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/hbase-staging 2024-12-06T10:12:52,739 INFO [Time-limited test {}] client.ConnectionUtils(129): master/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:12:52,740 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:12:52,740 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:12:52,740 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:12:52,740 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:12:52,740 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:12:52,740 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:12:52,740 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:12:52,741 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:32833 2024-12-06T10:12:52,742 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:12:52,743 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:12:52,746 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:32833 connecting to ZooKeeper ensemble=127.0.0.1:54272 2024-12-06T10:12:52,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:328330x0, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:12:52,756 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32833-0x10066d0f8560000 connected 2024-12-06T10:12:52,769 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:12:52,769 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:12:52,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:12:52,770 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32833 2024-12-06T10:12:52,770 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32833 2024-12-06T10:12:52,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32833 2024-12-06T10:12:52,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32833 2024-12-06T10:12:52,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32833 2024-12-06T10:12:52,771 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb, hbase.cluster.distributed=false 2024-12-06T10:12:52,788 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:12:52,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:12:52,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:12:52,788 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:12:52,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:12:52,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:12:52,788 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:12:52,788 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:12:52,789 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44863 2024-12-06T10:12:52,789 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T10:12:52,790 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T10:12:52,791 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:12:52,793 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:12:52,795 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44863 connecting to ZooKeeper ensemble=127.0.0.1:54272 2024-12-06T10:12:52,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:448630x0, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:12:52,798 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:448630x0, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:12:52,798 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44863-0x10066d0f8560001 connected 2024-12-06T10:12:52,799 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:12:52,799 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:12:52,799 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44863 2024-12-06T10:12:52,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44863 2024-12-06T10:12:52,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44863 2024-12-06T10:12:52,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44863 2024-12-06T10:12:52,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44863 2024-12-06T10:12:52,801 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/552d6a33fa09,32833,1733479972739 2024-12-06T10:12:52,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:12:52,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:12:52,803 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/552d6a33fa09,32833,1733479972739 2024-12-06T10:12:52,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:12:52,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:52,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:12:52,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:52,805 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:12:52,806 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/552d6a33fa09,32833,1733479972739 from backup master directory 2024-12-06T10:12:52,806 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:12:52,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/552d6a33fa09,32833,1733479972739 2024-12-06T10:12:52,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:12:52,807 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:12:52,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:12:52,808 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=552d6a33fa09,32833,1733479972739 2024-12-06T10:12:52,814 DEBUG [M:0;552d6a33fa09:32833 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;552d6a33fa09:32833 2024-12-06T10:12:52,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741826_1002 (size=42) 2024-12-06T10:12:52,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35385 is added to blk_1073741826_1002 (size=42) 2024-12-06T10:12:52,822 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/hbase.id with ID: 39c65da9-388d-4864-b300-3c242b9df94b 2024-12-06T10:12:52,835 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:12:52,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:52,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:52,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741827_1003 (size=196) 2024-12-06T10:12:52,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35385 is added to blk_1073741827_1003 (size=196) 2024-12-06T10:12:52,850 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:12:52,851 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T10:12:52,851 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:12:52,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35385 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:12:52,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:12:52,860 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store 2024-12-06T10:12:52,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741829_1005 (size=34) 2024-12-06T10:12:52,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35385 is added to blk_1073741829_1005 (size=34) 2024-12-06T10:12:52,867 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:12:52,867 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:12:52,867 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:12:52,867 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:12:52,867 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:12:52,867 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:12:52,867 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:12:52,867 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:12:52,868 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/.initializing 2024-12-06T10:12:52,868 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/WALs/552d6a33fa09,32833,1733479972739 2024-12-06T10:12:52,872 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C32833%2C1733479972739, suffix=, logDir=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/WALs/552d6a33fa09,32833,1733479972739, archiveDir=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/oldWALs, maxLogs=10 2024-12-06T10:12:52,872 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C32833%2C1733479972739.1733479972872 2024-12-06T10:12:52,877 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/WALs/552d6a33fa09,32833,1733479972739/552d6a33fa09%2C32833%2C1733479972739.1733479972872 2024-12-06T10:12:52,877 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41709:41709),(127.0.0.1/127.0.0.1:46787:46787)] 2024-12-06T10:12:52,877 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:12:52,877 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:12:52,878 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:12:52,878 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:12:52,879 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:12:52,880 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T10:12:52,880 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:52,881 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:12:52,881 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:12:52,882 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T10:12:52,882 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:52,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:12:52,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:12:52,884 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T10:12:52,884 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:52,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:12:52,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:12:52,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T10:12:52,886 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:52,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:12:52,887 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:12:52,887 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:12:52,889 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T10:12:52,891 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:12:52,893 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:12:52,893 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847393, jitterRate=0.07751607894897461}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T10:12:52,894 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:12:52,895 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T10:12:52,898 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@218da945, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:12:52,899 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T10:12:52,899 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T10:12:52,899 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T10:12:52,899 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T10:12:52,900 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T10:12:52,900 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-06T10:12:52,900 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T10:12:52,902 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T10:12:52,903 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T10:12:52,904 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T10:12:52,905 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T10:12:52,905 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T10:12:52,906 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T10:12:52,906 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T10:12:52,907 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T10:12:52,908 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T10:12:52,909 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T10:12:52,910 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T10:12:52,911 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T10:12:52,912 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T10:12:52,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:12:52,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:12:52,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:52,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:52,914 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=552d6a33fa09,32833,1733479972739, sessionid=0x10066d0f8560000, setting cluster-up flag (Was=false) 2024-12-06T10:12:52,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:52,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:52,921 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T10:12:52,922 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,32833,1733479972739 2024-12-06T10:12:52,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:52,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:52,929 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T10:12:52,930 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,32833,1733479972739 2024-12-06T10:12:52,932 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T10:12:52,933 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T10:12:52,933 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T10:12:52,933 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 552d6a33fa09,32833,1733479972739 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T10:12:52,933 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:12:52,933 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:12:52,933 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:12:52,933 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:12:52,933 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/552d6a33fa09:0, corePoolSize=10, maxPoolSize=10 2024-12-06T10:12:52,934 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:12:52,934 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:12:52,934 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:12:52,935 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733480002935 2024-12-06T10:12:52,935 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T10:12:52,935 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T10:12:52,935 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T10:12:52,935 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T10:12:52,935 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T10:12:52,935 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T10:12:52,935 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:52,935 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:12:52,935 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T10:12:52,936 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T10:12:52,936 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T10:12:52,936 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T10:12:52,936 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T10:12:52,936 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T10:12:52,936 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733479972936,5,FailOnTimeoutGroup] 2024-12-06T10:12:52,937 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:52,937 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:12:52,939 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733479972936,5,FailOnTimeoutGroup] 2024-12-06T10:12:52,939 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:52,939 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T10:12:52,939 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:52,939 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:52,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35385 is added to blk_1073741831_1007 (size=1039) 2024-12-06T10:12:52,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741831_1007 (size=1039) 2024-12-06T10:12:52,947 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T10:12:52,947 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb 2024-12-06T10:12:52,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741832_1008 (size=32) 2024-12-06T10:12:52,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35385 is added to blk_1073741832_1008 (size=32) 2024-12-06T10:12:52,958 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:12:52,960 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:12:52,961 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:12:52,961 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:52,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:12:52,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:12:52,964 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:12:52,964 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:52,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:12:52,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:12:52,966 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:12:52,966 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:52,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:12:52,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/hbase/meta/1588230740 2024-12-06T10:12:52,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/hbase/meta/1588230740 2024-12-06T10:12:52,970 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:12:52,971 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:12:52,974 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:12:52,974 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=706134, jitterRate=-0.10210521519184113}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:12:52,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:12:52,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:12:52,974 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:12:52,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:12:52,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:12:52,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:12:52,975 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T10:12:52,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:12:52,976 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:12:52,976 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T10:12:52,976 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T10:12:52,978 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T10:12:52,978 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T10:12:53,013 DEBUG [RS:0;552d6a33fa09:44863 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;552d6a33fa09:44863 2024-12-06T10:12:53,015 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1008): ClusterId : 39c65da9-388d-4864-b300-3c242b9df94b 2024-12-06T10:12:53,015 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T10:12:53,017 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T10:12:53,017 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T10:12:53,019 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T10:12:53,019 DEBUG [RS:0;552d6a33fa09:44863 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cf46fe2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:12:53,020 DEBUG [RS:0;552d6a33fa09:44863 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3da51933, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:12:53,020 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T10:12:53,020 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T10:12:53,020 DEBUG [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T10:12:53,020 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(3073): reportForDuty to master=552d6a33fa09,32833,1733479972739 with isa=552d6a33fa09/172.17.0.2:44863, startcode=1733479972787 2024-12-06T10:12:53,020 DEBUG [RS:0;552d6a33fa09:44863 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:12:53,027 INFO [RS-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56361, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:12:53,028 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32833 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 552d6a33fa09,44863,1733479972787 2024-12-06T10:12:53,028 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32833 {}] master.ServerManager(486): Registering regionserver=552d6a33fa09,44863,1733479972787 2024-12-06T10:12:53,030 DEBUG [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb 2024-12-06T10:12:53,030 DEBUG [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:36887 2024-12-06T10:12:53,030 DEBUG [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T10:12:53,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:12:53,032 DEBUG [RS:0;552d6a33fa09:44863 {}] zookeeper.ZKUtil(111): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/552d6a33fa09,44863,1733479972787 2024-12-06T10:12:53,032 WARN [RS:0;552d6a33fa09:44863 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:12:53,032 INFO [RS:0;552d6a33fa09:44863 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:12:53,032 DEBUG [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787 2024-12-06T10:12:53,032 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [552d6a33fa09,44863,1733479972787] 2024-12-06T10:12:53,035 DEBUG [RS:0;552d6a33fa09:44863 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T10:12:53,036 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T10:12:53,037 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T10:12:53,038 INFO [RS:0;552d6a33fa09:44863 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T10:12:53,038 INFO [RS:0;552d6a33fa09:44863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:53,038 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T10:12:53,039 INFO [RS:0;552d6a33fa09:44863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:53,039 DEBUG [RS:0;552d6a33fa09:44863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:12:53,039 DEBUG [RS:0;552d6a33fa09:44863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:12:53,039 DEBUG [RS:0;552d6a33fa09:44863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:12:53,039 DEBUG [RS:0;552d6a33fa09:44863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:12:53,039 DEBUG [RS:0;552d6a33fa09:44863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:12:53,039 DEBUG [RS:0;552d6a33fa09:44863 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:12:53,039 DEBUG [RS:0;552d6a33fa09:44863 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:12:53,040 DEBUG [RS:0;552d6a33fa09:44863 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:12:53,040 DEBUG [RS:0;552d6a33fa09:44863 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:12:53,040 DEBUG [RS:0;552d6a33fa09:44863 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:12:53,040 DEBUG [RS:0;552d6a33fa09:44863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:12:53,040 DEBUG [RS:0;552d6a33fa09:44863 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:12:53,040 DEBUG [RS:0;552d6a33fa09:44863 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:12:53,044 INFO [RS:0;552d6a33fa09:44863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:53,044 INFO [RS:0;552d6a33fa09:44863 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:53,044 INFO [RS:0;552d6a33fa09:44863 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:53,044 INFO [RS:0;552d6a33fa09:44863 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:53,044 INFO [RS:0;552d6a33fa09:44863 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,44863,1733479972787-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:12:53,067 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T10:12:53,068 INFO [RS:0;552d6a33fa09:44863 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,44863,1733479972787-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:53,085 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.Replication(204): 552d6a33fa09,44863,1733479972787 started 2024-12-06T10:12:53,086 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1767): Serving as 552d6a33fa09,44863,1733479972787, RpcServer on 552d6a33fa09/172.17.0.2:44863, sessionid=0x10066d0f8560001 2024-12-06T10:12:53,086 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T10:12:53,086 DEBUG [RS:0;552d6a33fa09:44863 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 552d6a33fa09,44863,1733479972787 2024-12-06T10:12:53,086 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,44863,1733479972787' 2024-12-06T10:12:53,086 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T10:12:53,086 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T10:12:53,087 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T10:12:53,087 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T10:12:53,087 DEBUG [RS:0;552d6a33fa09:44863 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 552d6a33fa09,44863,1733479972787 2024-12-06T10:12:53,087 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,44863,1733479972787' 2024-12-06T10:12:53,087 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T10:12:53,087 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T10:12:53,087 DEBUG [RS:0;552d6a33fa09:44863 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T10:12:53,087 INFO [RS:0;552d6a33fa09:44863 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T10:12:53,087 INFO [RS:0;552d6a33fa09:44863 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T10:12:53,129 WARN [552d6a33fa09:32833 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-06T10:12:53,189 INFO [RS:0;552d6a33fa09:44863 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C44863%2C1733479972787, suffix=, logDir=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787, archiveDir=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/oldWALs, maxLogs=32 2024-12-06T10:12:53,190 INFO [RS:0;552d6a33fa09:44863 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C44863%2C1733479972787.1733479973190 2024-12-06T10:12:53,197 INFO [RS:0;552d6a33fa09:44863 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 2024-12-06T10:12:53,197 DEBUG [RS:0;552d6a33fa09:44863 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41709:41709),(127.0.0.1/127.0.0.1:46787:46787)] 2024-12-06T10:12:53,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:12:53,379 DEBUG [552d6a33fa09:32833 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T10:12:53,379 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=552d6a33fa09,44863,1733479972787 2024-12-06T10:12:53,381 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,44863,1733479972787, state=OPENING 2024-12-06T10:12:53,382 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T10:12:53,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:53,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:53,384 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=552d6a33fa09,44863,1733479972787}] 2024-12-06T10:12:53,384 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:12:53,384 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:12:53,536 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,44863,1733479972787 2024-12-06T10:12:53,536 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T10:12:53,538 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44198, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T10:12:53,542 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T10:12:53,542 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:12:53,544 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C44863%2C1733479972787.meta, suffix=.meta, logDir=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787, archiveDir=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/oldWALs, maxLogs=32 2024-12-06T10:12:53,545 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C44863%2C1733479972787.meta.1733479973545.meta 2024-12-06T10:12:53,550 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.meta.1733479973545.meta 2024-12-06T10:12:53,550 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46787:46787),(127.0.0.1/127.0.0.1:41709:41709)] 2024-12-06T10:12:53,551 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:12:53,551 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T10:12:53,551 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T10:12:53,551 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T10:12:53,551 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T10:12:53,551 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:12:53,551 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T10:12:53,551 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T10:12:53,553 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:12:53,554 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:12:53,554 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:53,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:12:53,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:12:53,556 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:12:53,556 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:53,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:12:53,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:12:53,557 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:12:53,557 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:53,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:12:53,558 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/hbase/meta/1588230740 2024-12-06T10:12:53,559 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/hbase/meta/1588230740 2024-12-06T10:12:53,561 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:12:53,562 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:12:53,563 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818298, jitterRate=0.04052001237869263}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:12:53,563 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:12:53,564 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733479973536 2024-12-06T10:12:53,566 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T10:12:53,566 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T10:12:53,567 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,44863,1733479972787 2024-12-06T10:12:53,568 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,44863,1733479972787, state=OPEN 2024-12-06T10:12:53,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:12:53,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:12:53,572 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:12:53,572 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:12:53,574 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T10:12:53,574 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=552d6a33fa09,44863,1733479972787 in 188 msec 2024-12-06T10:12:53,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T10:12:53,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 598 msec 2024-12-06T10:12:53,578 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 646 msec 2024-12-06T10:12:53,578 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733479973578, completionTime=-1 2024-12-06T10:12:53,578 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T10:12:53,578 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T10:12:53,579 DEBUG [hconnection-0x1e700ca0-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:12:53,580 INFO [RS-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44208, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:12:53,581 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T10:12:53,581 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733480033581 2024-12-06T10:12:53,581 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733480093581 2024-12-06T10:12:53,582 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-06T10:12:53,587 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,32833,1733479972739-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:53,587 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,32833,1733479972739-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:53,587 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,32833,1733479972739-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:53,587 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-552d6a33fa09:32833, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:53,587 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:53,587 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T10:12:53,587 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:12:53,588 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T10:12:53,588 DEBUG [master/552d6a33fa09:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T10:12:53,589 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:12:53,589 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:53,590 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:12:53,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35385 is added to blk_1073741835_1011 (size=358) 2024-12-06T10:12:53,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741835_1011 (size=358) 2024-12-06T10:12:53,599 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4c3725ae98335711a0d217b13705573b, NAME => 'hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb 2024-12-06T10:12:53,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35385 is added to blk_1073741836_1012 (size=42) 2024-12-06T10:12:53,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741836_1012 (size=42) 2024-12-06T10:12:53,607 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:12:53,607 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 4c3725ae98335711a0d217b13705573b, disabling compactions & flushes 2024-12-06T10:12:53,607 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:12:53,607 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:12:53,607 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. after waiting 0 ms 2024-12-06T10:12:53,607 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:12:53,607 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:12:53,607 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4c3725ae98335711a0d217b13705573b: 2024-12-06T10:12:53,608 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:12:53,609 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733479973608"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733479973608"}]},"ts":"1733479973608"} 2024-12-06T10:12:53,611 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:12:53,612 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:12:53,612 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733479973612"}]},"ts":"1733479973612"} 2024-12-06T10:12:53,614 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T10:12:53,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4c3725ae98335711a0d217b13705573b, ASSIGN}] 2024-12-06T10:12:53,618 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4c3725ae98335711a0d217b13705573b, ASSIGN 2024-12-06T10:12:53,619 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=4c3725ae98335711a0d217b13705573b, ASSIGN; state=OFFLINE, location=552d6a33fa09,44863,1733479972787; forceNewPlan=false, retain=false 2024-12-06T10:12:53,770 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4c3725ae98335711a0d217b13705573b, regionState=OPENING, regionLocation=552d6a33fa09,44863,1733479972787 2024-12-06T10:12:53,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 4c3725ae98335711a0d217b13705573b, server=552d6a33fa09,44863,1733479972787}] 2024-12-06T10:12:53,924 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,44863,1733479972787 2024-12-06T10:12:53,928 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:12:53,928 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 4c3725ae98335711a0d217b13705573b, NAME => 'hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:12:53,928 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 4c3725ae98335711a0d217b13705573b 2024-12-06T10:12:53,929 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:12:53,929 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 4c3725ae98335711a0d217b13705573b 2024-12-06T10:12:53,929 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 4c3725ae98335711a0d217b13705573b 2024-12-06T10:12:53,930 INFO [StoreOpener-4c3725ae98335711a0d217b13705573b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4c3725ae98335711a0d217b13705573b 2024-12-06T10:12:53,932 INFO [StoreOpener-4c3725ae98335711a0d217b13705573b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c3725ae98335711a0d217b13705573b columnFamilyName info 2024-12-06T10:12:53,932 DEBUG [StoreOpener-4c3725ae98335711a0d217b13705573b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:53,932 INFO [StoreOpener-4c3725ae98335711a0d217b13705573b-1 {}] regionserver.HStore(327): Store=4c3725ae98335711a0d217b13705573b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:12:53,933 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/hbase/namespace/4c3725ae98335711a0d217b13705573b 2024-12-06T10:12:53,933 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/hbase/namespace/4c3725ae98335711a0d217b13705573b 2024-12-06T10:12:53,935 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 4c3725ae98335711a0d217b13705573b 2024-12-06T10:12:53,937 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/hbase/namespace/4c3725ae98335711a0d217b13705573b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:12:53,938 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 4c3725ae98335711a0d217b13705573b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748264, jitterRate=-0.04853345453739166}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:12:53,938 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 4c3725ae98335711a0d217b13705573b: 2024-12-06T10:12:53,939 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b., pid=6, masterSystemTime=1733479973924 2024-12-06T10:12:53,941 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:12:53,941 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:12:53,941 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4c3725ae98335711a0d217b13705573b, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,44863,1733479972787 2024-12-06T10:12:53,946 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T10:12:53,946 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 4c3725ae98335711a0d217b13705573b, server=552d6a33fa09,44863,1733479972787 in 171 msec 2024-12-06T10:12:53,948 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T10:12:53,948 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=4c3725ae98335711a0d217b13705573b, ASSIGN in 329 msec 2024-12-06T10:12:53,949 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:12:53,949 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733479973949"}]},"ts":"1733479973949"} 2024-12-06T10:12:53,951 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T10:12:53,953 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:12:53,955 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 366 msec 2024-12-06T10:12:53,989 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T10:12:53,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:53,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:12:53,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:12:53,995 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T10:12:54,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:12:54,007 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 12 msec 2024-12-06T10:12:54,017 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T10:12:54,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:12:54,029 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 10 msec 2024-12-06T10:12:54,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T10:12:54,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T10:12:54,044 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.236sec 2024-12-06T10:12:54,044 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T10:12:54,044 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T10:12:54,044 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T10:12:54,045 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T10:12:54,045 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T10:12:54,045 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,32833,1733479972739-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:12:54,045 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,32833,1733479972739-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T10:12:54,046 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T10:12:54,047 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T10:12:54,047 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,32833,1733479972739-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:12:54,103 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x040e5b4c to 127.0.0.1:54272 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d6f335 2024-12-06T10:12:54,107 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@227fb64a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:12:54,109 DEBUG [hconnection-0x1507a66b-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:12:54,111 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44224, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:12:54,112 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=552d6a33fa09,32833,1733479972739 2024-12-06T10:12:54,113 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:12:54,116 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-06T10:12:54,116 INFO [Time-limited test {}] wal.TestLogRolling(297): Starting testLogRollOnPipelineRestart 2024-12-06T10:12:54,116 INFO [Time-limited test {}] wal.TestLogRolling(300): Replication=2 2024-12-06T10:12:54,117 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T10:12:54,119 INFO [RS-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44348, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T10:12:54,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32833 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T10:12:54,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32833 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T10:12:54,120 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32833 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:12:54,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32833 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-06T10:12:54,122 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:12:54,123 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:54,123 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32833 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 9 2024-12-06T10:12:54,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:12:54,123 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:12:54,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741837_1013 (size=395) 2024-12-06T10:12:54,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35385 is added to blk_1073741837_1013 (size=395) 2024-12-06T10:12:54,133 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2dc4d1dea09061f7ad95dc80de94e4aa, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb 2024-12-06T10:12:54,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741838_1014 (size=78) 2024-12-06T10:12:54,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35385 is added to blk_1073741838_1014 (size=78) 2024-12-06T10:12:54,140 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:12:54,140 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1681): Closing 2dc4d1dea09061f7ad95dc80de94e4aa, disabling compactions & flushes 2024-12-06T10:12:54,140 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. 2024-12-06T10:12:54,140 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. 2024-12-06T10:12:54,140 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. after waiting 0 ms 2024-12-06T10:12:54,140 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. 2024-12-06T10:12:54,140 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. 2024-12-06T10:12:54,140 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2dc4d1dea09061f7ad95dc80de94e4aa: 2024-12-06T10:12:54,142 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:12:54,142 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733479974142"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733479974142"}]},"ts":"1733479974142"} 2024-12-06T10:12:54,144 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:12:54,145 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:12:54,145 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733479974145"}]},"ts":"1733479974145"} 2024-12-06T10:12:54,146 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-06T10:12:54,150 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=2dc4d1dea09061f7ad95dc80de94e4aa, ASSIGN}] 2024-12-06T10:12:54,151 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=2dc4d1dea09061f7ad95dc80de94e4aa, ASSIGN 2024-12-06T10:12:54,152 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=2dc4d1dea09061f7ad95dc80de94e4aa, ASSIGN; state=OFFLINE, location=552d6a33fa09,44863,1733479972787; forceNewPlan=false, retain=false 2024-12-06T10:12:54,302 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=2dc4d1dea09061f7ad95dc80de94e4aa, regionState=OPENING, regionLocation=552d6a33fa09,44863,1733479972787 2024-12-06T10:12:54,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:12:54,305 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 2dc4d1dea09061f7ad95dc80de94e4aa, server=552d6a33fa09,44863,1733479972787}] 2024-12-06T10:12:54,457 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,44863,1733479972787 2024-12-06T10:12:54,462 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. 2024-12-06T10:12:54,462 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 2dc4d1dea09061f7ad95dc80de94e4aa, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:12:54,462 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 2dc4d1dea09061f7ad95dc80de94e4aa 2024-12-06T10:12:54,462 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:12:54,462 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 2dc4d1dea09061f7ad95dc80de94e4aa 2024-12-06T10:12:54,463 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 2dc4d1dea09061f7ad95dc80de94e4aa 2024-12-06T10:12:54,465 INFO [StoreOpener-2dc4d1dea09061f7ad95dc80de94e4aa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2dc4d1dea09061f7ad95dc80de94e4aa 2024-12-06T10:12:54,466 INFO [StoreOpener-2dc4d1dea09061f7ad95dc80de94e4aa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2dc4d1dea09061f7ad95dc80de94e4aa columnFamilyName info 2024-12-06T10:12:54,466 DEBUG [StoreOpener-2dc4d1dea09061f7ad95dc80de94e4aa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:12:54,467 INFO [StoreOpener-2dc4d1dea09061f7ad95dc80de94e4aa-1 {}] regionserver.HStore(327): Store=2dc4d1dea09061f7ad95dc80de94e4aa/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:12:54,467 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/default/TestLogRolling-testLogRollOnPipelineRestart/2dc4d1dea09061f7ad95dc80de94e4aa 2024-12-06T10:12:54,468 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/default/TestLogRolling-testLogRollOnPipelineRestart/2dc4d1dea09061f7ad95dc80de94e4aa 2024-12-06T10:12:54,470 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 2dc4d1dea09061f7ad95dc80de94e4aa 2024-12-06T10:12:54,472 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/default/TestLogRolling-testLogRollOnPipelineRestart/2dc4d1dea09061f7ad95dc80de94e4aa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:12:54,472 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 2dc4d1dea09061f7ad95dc80de94e4aa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797597, jitterRate=0.014197215437889099}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:12:54,473 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 2dc4d1dea09061f7ad95dc80de94e4aa: 2024-12-06T10:12:54,474 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa., pid=11, masterSystemTime=1733479974457 2024-12-06T10:12:54,476 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. 2024-12-06T10:12:54,476 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. 2024-12-06T10:12:54,477 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=2dc4d1dea09061f7ad95dc80de94e4aa, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,44863,1733479972787 2024-12-06T10:12:54,480 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-06T10:12:54,480 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 2dc4d1dea09061f7ad95dc80de94e4aa, server=552d6a33fa09,44863,1733479972787 in 173 msec 2024-12-06T10:12:54,483 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-06T10:12:54,483 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=2dc4d1dea09061f7ad95dc80de94e4aa, ASSIGN in 331 msec 2024-12-06T10:12:54,483 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:12:54,484 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733479974484"}]},"ts":"1733479974484"} 2024-12-06T10:12:54,485 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-06T10:12:54,488 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:12:54,489 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 368 msec 2024-12-06T10:12:55,196 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-06T10:12:55,196 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-06T10:12:55,196 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-06T10:12:55,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:12:56,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:12:57,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:12:58,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:12:59,053 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T10:12:59,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:59,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:59,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:59,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:59,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:59,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:59,072 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:59,074 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:12:59,090 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-06T10:12:59,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:00,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:01,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:02,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:03,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:04,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32833 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:13:04,125 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart, procId: 9 completed 2024-12-06T10:13:04,128 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-06T10:13:04,128 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. 2024-12-06T10:13:04,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:05,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:06,134 INFO [Time-limited test {}] wal.TestLogRolling(337): log.getCurrentFileName()): hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 2024-12-06T10:13:06,135 WARN [ResponseProcessor for block BP-1837903265-172.17.0.2-1733479972059:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1837903265-172.17.0.2-1733479972059:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:06,135 WARN [ResponseProcessor for block BP-1837903265-172.17.0.2-1733479972059:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1837903265-172.17.0.2-1733479972059:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1837903265-172.17.0.2-1733479972059:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:35385,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:06,135 WARN [DataStreamer for file /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.meta.1733479973545.meta block BP-1837903265-172.17.0.2-1733479972059:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1837903265-172.17.0.2-1733479972059:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35385,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK], DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35385,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK]) is bad. 2024-12-06T10:13:06,135 WARN [ResponseProcessor for block BP-1837903265-172.17.0.2-1733479972059:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1837903265-172.17.0.2-1733479972059:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1837903265-172.17.0.2-1733479972059:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:35385,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:06,135 WARN [DataStreamer for file /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/WALs/552d6a33fa09,32833,1733479972739/552d6a33fa09%2C32833%2C1733479972739.1733479972872 block BP-1837903265-172.17.0.2-1733479972059:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1837903265-172.17.0.2-1733479972059:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK], DatanodeInfoWithStorage[127.0.0.1:35385,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35385,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK]) is bad. 2024-12-06T10:13:06,136 WARN [DataStreamer for file /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 block BP-1837903265-172.17.0.2-1733479972059:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1837903265-172.17.0.2-1733479972059:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK], DatanodeInfoWithStorage[127.0.0.1:35385,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35385,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK]) is bad. 2024-12-06T10:13:06,136 WARN [PacketResponder: BP-1837903265-172.17.0.2-1733479972059:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35385] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:06,136 WARN [PacketResponder: BP-1837903265-172.17.0.2-1733479972059:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35385] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:06,136 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-916040266_22 at /127.0.0.1:40426 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40426 dst: /127.0.0.1:35385 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:06,136 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-916040266_22 at /127.0.0.1:51206 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51206 dst: /127.0.0.1:35875 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:06,136 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1780657933_22 at /127.0.0.1:51164 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51164 dst: /127.0.0.1:35875 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:06,136 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-916040266_22 at /127.0.0.1:51200 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51200 dst: /127.0.0.1:35875 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:06,137 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1780657933_22 at /127.0.0.1:40394 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40394 dst: /127.0.0.1:35385 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:06,137 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-916040266_22 at /127.0.0.1:40416 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40416 dst: /127.0.0.1:35385 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:06,138 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@407bc9dd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:13:06,139 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e862ae5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:13:06,139 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:13:06,139 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@12436641{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:13:06,139 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@343e9b72{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,STOPPED} 2024-12-06T10:13:06,144 WARN [BP-1837903265-172.17.0.2-1733479972059 heartbeating to localhost/127.0.0.1:36887 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:13:06,144 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:13:06,144 WARN [BP-1837903265-172.17.0.2-1733479972059 heartbeating to localhost/127.0.0.1:36887 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1837903265-172.17.0.2-1733479972059 (Datanode Uuid 1649f975-9c26-4aac-a105-dd32c7c565d5) service to localhost/127.0.0.1:36887 2024-12-06T10:13:06,144 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:13:06,145 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data3/current/BP-1837903265-172.17.0.2-1733479972059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:13:06,145 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data4/current/BP-1837903265-172.17.0.2-1733479972059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:13:06,145 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:13:06,160 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:13:06,165 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:13:06,165 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:13:06,165 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:13:06,166 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:13:06,166 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42c52bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:13:06,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c1b4427{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:13:06,287 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2be51175{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/java.io.tmpdir/jetty-localhost-40351-hadoop-hdfs-3_4_1-tests_jar-_-any-18073154481230753925/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:13:06,287 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@545a7404{HTTP/1.1, (http/1.1)}{localhost:40351} 2024-12-06T10:13:06,287 INFO [Time-limited test {}] server.Server(415): Started @187579ms 2024-12-06T10:13:06,289 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:13:06,307 WARN [ResponseProcessor for block BP-1837903265-172.17.0.2-1733479972059:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1837903265-172.17.0.2-1733479972059:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:06,307 WARN [ResponseProcessor for block BP-1837903265-172.17.0.2-1733479972059:blk_1073741833_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1837903265-172.17.0.2-1733479972059:blk_1073741833_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:06,307 WARN [ResponseProcessor for block BP-1837903265-172.17.0.2-1733479972059:blk_1073741834_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1837903265-172.17.0.2-1733479972059:blk_1073741834_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:06,308 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-916040266_22 at /127.0.0.1:48516 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48516 dst: /127.0.0.1:35875 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:06,308 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-916040266_22 at /127.0.0.1:48514 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48514 dst: /127.0.0.1:35875 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:06,308 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1780657933_22 at /127.0.0.1:48508 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48508 dst: /127.0.0.1:35875 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:06,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:06,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@af01be3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:13:06,313 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e38b9fb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:13:06,313 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:13:06,314 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60c99d70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:13:06,314 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69cb648{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,STOPPED} 2024-12-06T10:13:06,315 WARN [BP-1837903265-172.17.0.2-1733479972059 heartbeating to localhost/127.0.0.1:36887 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:13:06,315 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:13:06,315 WARN [BP-1837903265-172.17.0.2-1733479972059 heartbeating to localhost/127.0.0.1:36887 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1837903265-172.17.0.2-1733479972059 (Datanode Uuid 5ce381aa-be4d-4717-bb1c-4292513b0537) service to localhost/127.0.0.1:36887 2024-12-06T10:13:06,315 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:13:06,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data1/current/BP-1837903265-172.17.0.2-1733479972059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:13:06,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data2/current/BP-1837903265-172.17.0.2-1733479972059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:13:06,316 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:13:06,327 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:13:06,331 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:13:06,331 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:13:06,331 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:13:06,332 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:13:06,332 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@759924d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:13:06,333 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7507c3e6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:13:06,393 WARN [Thread-1088 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:13:06,395 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc9cfad32bc9f7ec with lease ID 0x3a716a104bf932af: from storage DS-ce785a61-022d-4529-b138-17b96cbc715b node DatanodeRegistration(127.0.0.1:43741, datanodeUuid=1649f975-9c26-4aac-a105-dd32c7c565d5, infoPort=36823, infoSecurePort=0, ipcPort=41139, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:13:06,396 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc9cfad32bc9f7ec with lease ID 0x3a716a104bf932af: from storage DS-b6537024-efe5-41dd-a6b6-da932ff525f2 node DatanodeRegistration(127.0.0.1:43741, datanodeUuid=1649f975-9c26-4aac-a105-dd32c7c565d5, infoPort=36823, infoSecurePort=0, ipcPort=41139, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T10:13:06,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7fe522ec{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/java.io.tmpdir/jetty-localhost-43981-hadoop-hdfs-3_4_1-tests_jar-_-any-10813457077529732240/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:13:06,455 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7650d9c4{HTTP/1.1, (http/1.1)}{localhost:43981} 2024-12-06T10:13:06,455 INFO [Time-limited test {}] server.Server(415): Started @187747ms 2024-12-06T10:13:06,456 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:13:06,542 WARN [Thread-1119 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:13:06,545 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfebf9d8fb5a7a94e with lease ID 0x3a716a104bf932b0: from storage DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a node DatanodeRegistration(127.0.0.1:44317, datanodeUuid=5ce381aa-be4d-4717-bb1c-4292513b0537, infoPort=38531, infoSecurePort=0, ipcPort=41635, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T10:13:06,545 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfebf9d8fb5a7a94e with lease ID 0x3a716a104bf932b0: from storage DS-fbe484ac-2101-4132-baf5-b9b1360e2bb2 node DatanodeRegistration(127.0.0.1:44317, datanodeUuid=5ce381aa-be4d-4717-bb1c-4292513b0537, infoPort=38531, infoSecurePort=0, ipcPort=41635, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:13:07,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:07,474 INFO [Time-limited test {}] wal.TestLogRolling(349): Data Nodes restarted 2024-12-06T10:13:07,476 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-06T10:13:07,477 WARN [RS:0;552d6a33fa09:44863.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=5, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:07,477 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C44863%2C1733479972787:(num 1733479973190) roll requested 2024-12-06T10:13:07,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44863 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:07,478 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C44863%2C1733479972787.1733479987478 2024-12-06T10:13:07,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44863 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:44224 deadline: 1733479997477, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-06T10:13:07,483 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 newFile=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 2024-12-06T10:13:07,484 WARN [regionserver/552d6a33fa09:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-06T10:13:07,484 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 with entries=5, filesize=2.09 KB; new WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 2024-12-06T10:13:07,486 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36823:36823),(127.0.0.1/127.0.0.1:38531:38531)] 2024-12-06T10:13:07,486 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:07,486 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 is not closed yet, will try archiving it next time 2024-12-06T10:13:07,486 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:07,486 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 2024-12-06T10:13:07,487 WARN [IPC Server handler 1 on default port 36887 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1017 2024-12-06T10:13:07,487 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 after 1ms 2024-12-06T10:13:08,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:09,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:10,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:10,395 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1017: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-06T10:13:11,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:11,488 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 after 4002ms 2024-12-06T10:13:12,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:13,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:14,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:15,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:16,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:17,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:18,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:19,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:19,521 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-06T10:13:20,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:21,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:21,524 WARN [ResponseProcessor for block BP-1837903265-172.17.0.2-1733479972059:blk_1073741839_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1837903265-172.17.0.2-1733479972059:blk_1073741839_1018 java.io.IOException: Bad response ERROR for BP-1837903265-172.17.0.2-1733479972059:blk_1073741839_1018 from datanode DatanodeInfoWithStorage[127.0.0.1:44317,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:21,524 WARN [DataStreamer for file /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 block BP-1837903265-172.17.0.2-1733479972059:blk_1073741839_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1837903265-172.17.0.2-1733479972059:blk_1073741839_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43741,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK], DatanodeInfoWithStorage[127.0.0.1:44317,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44317,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]) is bad. 2024-12-06T10:13:21,524 WARN [PacketResponder: BP-1837903265-172.17.0.2-1733479972059:blk_1073741839_1018, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44317] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:21,525 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-916040266_22 at /127.0.0.1:39914 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:43741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39914 dst: /127.0.0.1:43741 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:21,525 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-916040266_22 at /127.0.0.1:48868 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:44317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48868 dst: /127.0.0.1:44317 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:21,526 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7fe522ec{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:13:21,526 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7650d9c4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:13:21,527 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:13:21,527 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7507c3e6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:13:21,527 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@759924d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,STOPPED} 2024-12-06T10:13:21,529 WARN [BP-1837903265-172.17.0.2-1733479972059 heartbeating to localhost/127.0.0.1:36887 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:13:21,529 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:13:21,529 WARN [BP-1837903265-172.17.0.2-1733479972059 heartbeating to localhost/127.0.0.1:36887 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1837903265-172.17.0.2-1733479972059 (Datanode Uuid 5ce381aa-be4d-4717-bb1c-4292513b0537) service to localhost/127.0.0.1:36887 2024-12-06T10:13:21,529 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:13:21,530 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data1/current/BP-1837903265-172.17.0.2-1733479972059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:13:21,530 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data2/current/BP-1837903265-172.17.0.2-1733479972059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:13:21,530 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:13:21,541 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:13:21,544 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:13:21,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:13:21,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:13:21,545 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:13:21,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53f2a5ca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:13:21,547 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69b93707{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:13:21,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ada0022{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/java.io.tmpdir/jetty-localhost-44315-hadoop-hdfs-3_4_1-tests_jar-_-any-18387076758234396429/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:13:21,673 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b8b5d80{HTTP/1.1, (http/1.1)}{localhost:44315} 2024-12-06T10:13:21,673 INFO [Time-limited test {}] server.Server(415): Started @202964ms 2024-12-06T10:13:21,674 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:13:21,692 WARN [ResponseProcessor for block BP-1837903265-172.17.0.2-1733479972059:blk_1073741839_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1837903265-172.17.0.2-1733479972059:blk_1073741839_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:21,693 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-916040266_22 at /127.0.0.1:38764 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:43741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38764 dst: /127.0.0.1:43741 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:21,695 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2be51175{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:13:21,696 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@545a7404{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:13:21,696 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:13:21,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c1b4427{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:13:21,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42c52bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,STOPPED} 2024-12-06T10:13:21,699 WARN [BP-1837903265-172.17.0.2-1733479972059 heartbeating to localhost/127.0.0.1:36887 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:13:21,699 WARN [BP-1837903265-172.17.0.2-1733479972059 heartbeating to localhost/127.0.0.1:36887 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1837903265-172.17.0.2-1733479972059 (Datanode Uuid 1649f975-9c26-4aac-a105-dd32c7c565d5) service to localhost/127.0.0.1:36887 2024-12-06T10:13:21,699 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:13:21,699 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:13:21,699 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data3/current/BP-1837903265-172.17.0.2-1733479972059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:13:21,700 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data4/current/BP-1837903265-172.17.0.2-1733479972059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:13:21,701 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:13:21,713 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:13:21,718 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:13:21,721 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:13:21,721 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:13:21,721 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:13:21,724 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1cf6748f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:13:21,724 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47350e54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:13:21,773 WARN [Thread-1163 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:13:21,775 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa64e4f106ccd511d with lease ID 0x3a716a104bf932b1: from storage DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a node DatanodeRegistration(127.0.0.1:44949, datanodeUuid=5ce381aa-be4d-4717-bb1c-4292513b0537, infoPort=39325, infoSecurePort=0, ipcPort=37781, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:13:21,776 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa64e4f106ccd511d with lease ID 0x3a716a104bf932b1: from storage DS-fbe484ac-2101-4132-baf5-b9b1360e2bb2 node DatanodeRegistration(127.0.0.1:44949, datanodeUuid=5ce381aa-be4d-4717-bb1c-4292513b0537, infoPort=39325, infoSecurePort=0, ipcPort=37781, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:13:21,844 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4934db34{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/java.io.tmpdir/jetty-localhost-34745-hadoop-hdfs-3_4_1-tests_jar-_-any-1067219186796168886/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:13:21,845 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@129adbbb{HTTP/1.1, (http/1.1)}{localhost:34745} 2024-12-06T10:13:21,845 INFO [Time-limited test {}] server.Server(415): Started @203136ms 2024-12-06T10:13:21,846 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:13:21,927 WARN [Thread-1194 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:13:21,929 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe6b624e03b1cdde0 with lease ID 0x3a716a104bf932b2: from storage DS-ce785a61-022d-4529-b138-17b96cbc715b node DatanodeRegistration(127.0.0.1:43037, datanodeUuid=1649f975-9c26-4aac-a105-dd32c7c565d5, infoPort=38209, infoSecurePort=0, ipcPort=37047, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:13:21,930 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe6b624e03b1cdde0 with lease ID 0x3a716a104bf932b2: from storage DS-b6537024-efe5-41dd-a6b6-da932ff525f2 node DatanodeRegistration(127.0.0.1:43037, datanodeUuid=1649f975-9c26-4aac-a105-dd32c7c565d5, infoPort=38209, infoSecurePort=0, ipcPort=37047, storageInfo=lv=-57;cid=testClusterID;nsid=1573747357;c=1733479972059), blocks: 8, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T10:13:22,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:22,717 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:13:22,866 INFO [Time-limited test {}] wal.TestLogRolling(366): Data Nodes restarted 2024-12-06T10:13:22,868 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-06T10:13:22,869 WARN [RS:0;552d6a33fa09:44863.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=8, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43741,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:22,869 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C44863%2C1733479972787:(num 1733479987478) roll requested 2024-12-06T10:13:22,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44863 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43741,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:22,870 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C44863%2C1733479972787.1733480002869 2024-12-06T10:13:22,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44863 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:44224 deadline: 1733480012869, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-06T10:13:22,875 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 newFile=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480002869 2024-12-06T10:13:22,875 WARN [regionserver/552d6a33fa09:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-06T10:13:22,875 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480002869 2024-12-06T10:13:22,875 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39325:39325),(127.0.0.1/127.0.0.1:38209:38209)] 2024-12-06T10:13:22,875 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43741,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:22,875 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 is not closed yet, will try archiving it next time 2024-12-06T10:13:22,876 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43741,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:22,876 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 2024-12-06T10:13:22,876 WARN [IPC Server handler 3 on default port 36887 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1020 2024-12-06T10:13:22,876 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 after 0ms 2024-12-06T10:13:22,935 WARN [master/552d6a33fa09:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=95, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:22,936 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C32833%2C1733479972739:(num 1733479972872) roll requested 2024-12-06T10:13:22,936 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:22,936 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C32833%2C1733479972739.1733480002936 2024-12-06T10:13:22,936 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:22,941 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL 2024-12-06T10:13:22,941 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/WALs/552d6a33fa09,32833,1733479972739/552d6a33fa09%2C32833%2C1733479972739.1733479972872 with entries=92, filesize=45.99 KB; new WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/WALs/552d6a33fa09,32833,1733479972739/552d6a33fa09%2C32833%2C1733479972739.1733480002936 2024-12-06T10:13:22,942 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38209:38209),(127.0.0.1/127.0.0.1:39325:39325)] 2024-12-06T10:13:22,942 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/WALs/552d6a33fa09,32833,1733479972739/552d6a33fa09%2C32833%2C1733479972739.1733479972872 is not closed yet, will try archiving it next time 2024-12-06T10:13:22,942 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:22,942 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:22,942 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/WALs/552d6a33fa09,32833,1733479972739/552d6a33fa09%2C32833%2C1733479972739.1733479972872 2024-12-06T10:13:22,942 WARN [IPC Server handler 4 on default port 36887 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/WALs/552d6a33fa09,32833,1733479972739/552d6a33fa09%2C32833%2C1733479972739.1733479972872 has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741830_1015 2024-12-06T10:13:22,943 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/WALs/552d6a33fa09,32833,1733479972739/552d6a33fa09%2C32833%2C1733479972739.1733479972872 after 1ms 2024-12-06T10:13:23,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:24,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:25,020 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:13:25,021 INFO [RS-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42972, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:13:25,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:25,776 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741839_1020: GenerationStamp not matched, existing replica is blk_1073741839_1018 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-06T10:13:26,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:26,877 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 after 4001ms 2024-12-06T10:13:26,943 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/WALs/552d6a33fa09,32833,1733479972739/552d6a33fa09%2C32833%2C1733479972739.1733479972872 after 4001ms 2024-12-06T10:13:27,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:28,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:28,932 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-06T10:13:29,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:30,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:31,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:32,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:33,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:34,048 INFO [master/552d6a33fa09:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T10:13:34,048 INFO [master/552d6a33fa09:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T10:13:34,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:34,954 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C44863%2C1733479972787.1733480014954 2024-12-06T10:13:34,960 DEBUG [Time-limited test {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480002869 newFile=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 2024-12-06T10:13:34,962 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480002869 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 2024-12-06T10:13:34,962 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39325:39325),(127.0.0.1/127.0.0.1:38209:38209)] 2024-12-06T10:13:34,962 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480002869 is not closed yet, will try archiving it next time 2024-12-06T10:13:34,962 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 2024-12-06T10:13:34,962 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 2024-12-06T10:13:34,962 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 after 0ms 2024-12-06T10:13:34,963 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 2024-12-06T10:13:34,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43037 is added to blk_1073741840_1021 (size=1264) 2024-12-06T10:13:34,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44949 is added to blk_1073741840_1021 (size=1264) 2024-12-06T10:13:34,970 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733479973938/Put/vlen=162/seqid=0] 2024-12-06T10:13:34,970 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [default/info:d/1733479974000/Put/vlen=9/seqid=0] 2024-12-06T10:13:34,970 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #5: [hbase/info:d/1733479974022/Put/vlen=7/seqid=0] 2024-12-06T10:13:34,970 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733479974473/Put/vlen=218/seqid=0] 2024-12-06T10:13:34,970 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [row1002/info:/1733479984132/Put/vlen=1045/seqid=0] 2024-12-06T10:13:34,970 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479973190 2024-12-06T10:13:34,971 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 2024-12-06T10:13:34,971 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 2024-12-06T10:13:34,971 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 after 0ms 2024-12-06T10:13:34,971 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 2024-12-06T10:13:34,975 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #6: [row1003/info:/1733479997518/Put/vlen=1045/seqid=0] 2024-12-06T10:13:34,975 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #7: [row1004/info:/1733479999522/Put/vlen=1045/seqid=0] 2024-12-06T10:13:34,975 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733479987478 2024-12-06T10:13:34,975 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480002869 2024-12-06T10:13:34,975 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480002869 2024-12-06T10:13:34,975 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480002869 after 0ms 2024-12-06T10:13:34,975 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480002869 2024-12-06T10:13:34,979 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #9: [row1005/info:/1733480012953/Put/vlen=1045/seqid=0] 2024-12-06T10:13:34,979 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 2024-12-06T10:13:34,979 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 2024-12-06T10:13:34,979 WARN [IPC Server handler 1 on default port 36887 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741842_1025 2024-12-06T10:13:34,980 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 after 1ms 2024-12-06T10:13:35,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:35,937 WARN [ResponseProcessor for block BP-1837903265-172.17.0.2-1733479972059:blk_1073741842_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1837903265-172.17.0.2-1733479972059:blk_1073741842_1025 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:35,937 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1780657933_22 at /127.0.0.1:48752 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:44949:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48752 dst: /127.0.0.1:44949 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44949 remote=/127.0.0.1:48752]. Total timeout mills is 60000, 59022 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:35,937 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1780657933_22 at /127.0.0.1:33006 [Receiving block BP-1837903265-172.17.0.2-1733479972059:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:43037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33006 dst: /127.0.0.1:43037 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:35,938 WARN [DataStreamer for file /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 block BP-1837903265-172.17.0.2-1733479972059:blk_1073741842_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1837903265-172.17.0.2-1733479972059:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44949,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK], DatanodeInfoWithStorage[127.0.0.1:43037,DS-ce785a61-022d-4529-b138-17b96cbc715b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44949,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]) is bad. 2024-12-06T10:13:35,942 WARN [DataStreamer for file /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 block BP-1837903265-172.17.0.2-1733479972059:blk_1073741842_1025 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1837903265-172.17.0.2-1733479972059:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:35,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44949 is added to blk_1073741842_1026 (size=85) 2024-12-06T10:13:36,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:37,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:38,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:38,980 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 after 4001ms 2024-12-06T10:13:38,981 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 2024-12-06T10:13:38,985 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 2024-12-06T10:13:38,985 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 2dc4d1dea09061f7ad95dc80de94e4aa 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-06T10:13:38,985 WARN [RS:0;552d6a33fa09:44863.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=11, requesting roll of WAL org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1837903265-172.17.0.2-1733479972059:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:38,986 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C44863%2C1733479972787:(num 1733480014954) roll requested 2024-12-06T10:13:38,986 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 2dc4d1dea09061f7ad95dc80de94e4aa: 2024-12-06T10:13:38,986 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C44863%2C1733479972787.1733480018986 2024-12-06T10:13:38,986 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=11, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=11, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1837903265-172.17.0.2-1733479972059:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:38,987 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.42 KB 2024-12-06T10:13:38,987 WARN [RS_OPEN_META-regionserver/552d6a33fa09:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:38,987 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-06T10:13:38,987 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:38,988 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 4c3725ae98335711a0d217b13705573b 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-06T10:13:38,988 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 4c3725ae98335711a0d217b13705573b: 2024-12-06T10:13:38,988 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=11, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=11, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1837903265-172.17.0.2-1733479972059:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:38,991 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T10:13:38,991 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T10:13:38,991 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x040e5b4c to 127.0.0.1:54272 2024-12-06T10:13:38,991 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:13:38,991 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T10:13:38,991 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=717145032, stopped=false 2024-12-06T10:13:38,991 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=552d6a33fa09,32833,1733479972739 2024-12-06T10:13:38,992 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 newFile=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480018986 2024-12-06T10:13:38,992 WARN [regionserver/552d6a33fa09:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=11, requesting roll of WAL 2024-12-06T10:13:38,992 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480018986 2024-12-06T10:13:38,992 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39325:39325),(127.0.0.1/127.0.0.1:38209:38209)] 2024-12-06T10:13:38,992 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 is not closed yet, will try archiving it next time 2024-12-06T10:13:38,992 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 552d6a33fa09%2C44863%2C1733479972787.meta:.meta(num 1733479973545) roll requested 2024-12-06T10:13:38,992 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1837903265-172.17.0.2-1733479972059:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:38,992 INFO [regionserver/552d6a33fa09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C44863%2C1733479972787.meta.1733480018992.meta 2024-12-06T10:13:38,992 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1837903265-172.17.0.2-1733479972059:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:38,993 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 2024-12-06T10:13:38,993 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 after 0ms 2024-12-06T10:13:38,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:13:38,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:38,994 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T10:13:38,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:13:38,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:38,994 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:13:38,994 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.1733480014954 to hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/oldWALs/552d6a33fa09%2C44863%2C1733479972787.1733480014954 2024-12-06T10:13:38,994 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,44863,1733479972787' ***** 2024-12-06T10:13:38,994 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T10:13:38,994 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T10:13:38,995 INFO [RS:0;552d6a33fa09:44863 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T10:13:38,995 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T10:13:38,995 INFO [RS:0;552d6a33fa09:44863 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T10:13:38,995 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:13:38,995 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(3579): Received CLOSE for 2dc4d1dea09061f7ad95dc80de94e4aa 2024-12-06T10:13:38,995 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:13:38,996 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(3579): Received CLOSE for 4c3725ae98335711a0d217b13705573b 2024-12-06T10:13:38,996 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,44863,1733479972787 2024-12-06T10:13:38,996 DEBUG [RS:0;552d6a33fa09:44863 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:13:38,996 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T10:13:38,996 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T10:13:38,996 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T10:13:38,996 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T10:13:38,996 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-06T10:13:38,996 DEBUG [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1603): Online Regions={2dc4d1dea09061f7ad95dc80de94e4aa=TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa., 1588230740=hbase:meta,,1.1588230740, 4c3725ae98335711a0d217b13705573b=hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b.} 2024-12-06T10:13:38,996 DEBUG [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 2dc4d1dea09061f7ad95dc80de94e4aa, 4c3725ae98335711a0d217b13705573b 2024-12-06T10:13:38,996 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:13:38,996 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:13:38,996 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:13:38,996 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 2dc4d1dea09061f7ad95dc80de94e4aa, disabling compactions & flushes 2024-12-06T10:13:38,997 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:13:38,997 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. 2024-12-06T10:13:38,997 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:13:38,997 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. 2024-12-06T10:13:38,997 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. after waiting 0 ms 2024-12-06T10:13:38,997 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. 2024-12-06T10:13:38,997 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.89 KB 2024-12-06T10:13:38,997 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 2dc4d1dea09061f7ad95dc80de94e4aa 1/1 column families, dataSize=4.20 KB heapSize=4.98 KB 2024-12-06T10:13:38,997 WARN [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-06T10:13:38,997 WARN [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-06T10:13:38,997 WARN [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-06T10:13:38,997 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:13:38,997 ERROR [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2808): ***** ABORTING region server 552d6a33fa09,44863,1733479972787: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:38,997 ERROR [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2815): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-06T10:13:38,998 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-06T10:13:38,999 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-06T10:13:38,999 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-06T10:13:38,999 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-06T10:13:38,999 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2819): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 643476480 }, "NonHeapMemoryUsage": { "committed": 169082880, "init": 7667712, "max": -1, "used": 167106872 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-12-06T10:13:39,001 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32833 {}] master.MasterRpcServices(626): 552d6a33fa09,44863,1733479972787 reported a fatal error: ***** ABORTING region server 552d6a33fa09,44863,1733479972787: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** Cause: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) 2024-12-06T10:13:39,002 WARN [regionserver/552d6a33fa09:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-06T10:13:39,002 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.meta.1733479973545.meta with entries=11, filesize=3.66 KB; new WAL /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.meta.1733480018992.meta 2024-12-06T10:13:39,002 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39325:39325),(127.0.0.1/127.0.0.1:38209:38209)] 2024-12-06T10:13:39,002 DEBUG [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.meta.1733479973545.meta is not closed yet, will try archiving it next time 2024-12-06T10:13:39,002 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:39,002 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35875,DS-4e57a86a-3da1-43e2-adf5-c6d24b11132a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T10:13:39,002 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.meta.1733479973545.meta 2024-12-06T10:13:39,003 WARN [IPC Server handler 4 on default port 36887 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.meta.1733479973545.meta has not been closed. Lease recovery is in progress. RecoveryId = 1029 for block blk_1073741834_1016 2024-12-06T10:13:39,003 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.meta.1733479973545.meta after 1ms 2024-12-06T10:13:39,020 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/default/TestLogRolling-testLogRollOnPipelineRestart/2dc4d1dea09061f7ad95dc80de94e4aa/.tmp/info/2575055d238547ed9adacf5bb2e58177 is 1080, key is row1002/info:/1733479984132/Put/seqid=0 2024-12-06T10:13:39,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44949 is added to blk_1073741845_1030 (size=9270) 2024-12-06T10:13:39,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43037 is added to blk_1073741845_1030 (size=9270) 2024-12-06T10:13:39,026 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/default/TestLogRolling-testLogRollOnPipelineRestart/2dc4d1dea09061f7ad95dc80de94e4aa/.tmp/info/2575055d238547ed9adacf5bb2e58177 2024-12-06T10:13:39,033 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/default/TestLogRolling-testLogRollOnPipelineRestart/2dc4d1dea09061f7ad95dc80de94e4aa/.tmp/info/2575055d238547ed9adacf5bb2e58177 as hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/default/TestLogRolling-testLogRollOnPipelineRestart/2dc4d1dea09061f7ad95dc80de94e4aa/info/2575055d238547ed9adacf5bb2e58177 2024-12-06T10:13:39,038 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/default/TestLogRolling-testLogRollOnPipelineRestart/2dc4d1dea09061f7ad95dc80de94e4aa/info/2575055d238547ed9adacf5bb2e58177, entries=4, sequenceid=12, filesize=9.1 K 2024-12-06T10:13:39,039 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 2dc4d1dea09061f7ad95dc80de94e4aa in 42ms, sequenceid=12, compaction requested=false 2024-12-06T10:13:39,043 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/data/default/TestLogRolling-testLogRollOnPipelineRestart/2dc4d1dea09061f7ad95dc80de94e4aa/recovered.edits/15.seqid, newMaxSeqId=15, maxSeqId=1 2024-12-06T10:13:39,044 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. 2024-12-06T10:13:39,044 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 2dc4d1dea09061f7ad95dc80de94e4aa: 2024-12-06T10:13:39,044 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733479974120.2dc4d1dea09061f7ad95dc80de94e4aa. 2024-12-06T10:13:39,044 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 4c3725ae98335711a0d217b13705573b, disabling compactions & flushes 2024-12-06T10:13:39,044 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:13:39,044 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:13:39,044 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. after waiting 0 ms 2024-12-06T10:13:39,044 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:13:39,045 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 4c3725ae98335711a0d217b13705573b: 2024-12-06T10:13:39,045 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:13:39,046 INFO [regionserver/552d6a33fa09:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:13:39,100 INFO [regionserver/552d6a33fa09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-06T10:13:39,100 INFO [regionserver/552d6a33fa09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-06T10:13:39,196 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T10:13:39,197 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(3579): Received CLOSE for 4c3725ae98335711a0d217b13705573b 2024-12-06T10:13:39,197 DEBUG [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4c3725ae98335711a0d217b13705573b 2024-12-06T10:13:39,197 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 4c3725ae98335711a0d217b13705573b, disabling compactions & flushes 2024-12-06T10:13:39,197 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:13:39,197 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:13:39,197 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:13:39,197 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:13:39,197 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:13:39,197 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. after waiting 0 ms 2024-12-06T10:13:39,197 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:13:39,197 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:13:39,197 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:13:39,197 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 4c3725ae98335711a0d217b13705573b: 2024-12-06T10:13:39,197 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:13:39,197 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:namespace,,1733479973587.4c3725ae98335711a0d217b13705573b. 2024-12-06T10:13:39,197 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:meta,,1.1588230740 2024-12-06T10:13:39,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:39,397 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1624): We were exiting though online regions are not empty, because some regions failed closing 2024-12-06T10:13:39,397 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,44863,1733479972787; all regions closed. 2024-12-06T10:13:39,397 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787 2024-12-06T10:13:39,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43037 is added to blk_1073741844_1028 (size=93) 2024-12-06T10:13:39,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44949 is added to blk_1073741844_1028 (size=93) 2024-12-06T10:13:40,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:40,934 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1016: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-06T10:13:41,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:42,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:43,004 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787/552d6a33fa09%2C44863%2C1733479972787.meta.1733479973545.meta after 4002ms 2024-12-06T10:13:43,004 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/WALs/552d6a33fa09,44863,1733479972787 2024-12-06T10:13:43,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43037 is added to blk_1073741843_1027 (size=1162) 2024-12-06T10:13:43,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44949 is added to blk_1073741843_1027 (size=1162) 2024-12-06T10:13:43,007 DEBUG [RS:0;552d6a33fa09:44863 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:13:43,007 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:13:43,008 INFO [RS:0;552d6a33fa09:44863 {}] hbase.ChoreService(370): Chore service for: regionserver/552d6a33fa09:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-06T10:13:43,008 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:13:43,008 INFO [RS:0;552d6a33fa09:44863 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44863 2024-12-06T10:13:43,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/552d6a33fa09,44863,1733479972787 2024-12-06T10:13:43,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:13:43,011 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [552d6a33fa09,44863,1733479972787] 2024-12-06T10:13:43,011 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 552d6a33fa09,44863,1733479972787; numProcessing=1 2024-12-06T10:13:43,013 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/552d6a33fa09,44863,1733479972787 already deleted, retry=false 2024-12-06T10:13:43,013 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 552d6a33fa09,44863,1733479972787 expired; onlineServers=0 2024-12-06T10:13:43,013 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,32833,1733479972739' ***** 2024-12-06T10:13:43,013 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T10:13:43,013 DEBUG [M:0;552d6a33fa09:32833 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36edb3d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:13:43,013 INFO [M:0;552d6a33fa09:32833 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,32833,1733479972739 2024-12-06T10:13:43,013 INFO [M:0;552d6a33fa09:32833 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,32833,1733479972739; all regions closed. 2024-12-06T10:13:43,013 DEBUG [M:0;552d6a33fa09:32833 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:13:43,013 DEBUG [M:0;552d6a33fa09:32833 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T10:13:43,014 DEBUG [M:0;552d6a33fa09:32833 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T10:13:43,014 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T10:13:43,014 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733479972936 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733479972936,5,FailOnTimeoutGroup] 2024-12-06T10:13:43,014 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733479972936 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733479972936,5,FailOnTimeoutGroup] 2024-12-06T10:13:43,014 INFO [M:0;552d6a33fa09:32833 {}] hbase.ChoreService(370): Chore service for: master/552d6a33fa09:0 had [] on shutdown 2024-12-06T10:13:43,014 DEBUG [M:0;552d6a33fa09:32833 {}] master.HMaster(1733): Stopping service threads 2024-12-06T10:13:43,014 INFO [M:0;552d6a33fa09:32833 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T10:13:43,014 INFO [M:0;552d6a33fa09:32833 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T10:13:43,014 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T10:13:43,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T10:13:43,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:43,015 DEBUG [M:0;552d6a33fa09:32833 {}] zookeeper.ZKUtil(347): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T10:13:43,015 WARN [M:0;552d6a33fa09:32833 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T10:13:43,015 INFO [M:0;552d6a33fa09:32833 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T10:13:43,015 INFO [M:0;552d6a33fa09:32833 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T10:13:43,015 DEBUG [M:0;552d6a33fa09:32833 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:13:43,015 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:13:43,015 INFO [M:0;552d6a33fa09:32833 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:13:43,016 DEBUG [M:0;552d6a33fa09:32833 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:13:43,016 DEBUG [M:0;552d6a33fa09:32833 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:13:43,016 DEBUG [M:0;552d6a33fa09:32833 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:13:43,016 INFO [M:0;552d6a33fa09:32833 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.09 KB heapSize=49.26 KB 2024-12-06T10:13:43,032 DEBUG [M:0;552d6a33fa09:32833 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b98ed9d9a5734088a0c9ac01651eb564 is 82, key is hbase:meta,,1/info:regioninfo/1733479973567/Put/seqid=0 2024-12-06T10:13:43,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43037 is added to blk_1073741846_1031 (size=5672) 2024-12-06T10:13:43,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44949 is added to blk_1073741846_1031 (size=5672) 2024-12-06T10:13:43,039 INFO [M:0;552d6a33fa09:32833 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b98ed9d9a5734088a0c9ac01651eb564 2024-12-06T10:13:43,060 DEBUG [M:0;552d6a33fa09:32833 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7ea808e27e7c4883bfeee3c2b658baef is 778, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733479974489/Put/seqid=0 2024-12-06T10:13:43,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43037 is added to blk_1073741847_1032 (size=7469) 2024-12-06T10:13:43,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44949 is added to blk_1073741847_1032 (size=7469) 2024-12-06T10:13:43,066 INFO [M:0;552d6a33fa09:32833 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.49 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7ea808e27e7c4883bfeee3c2b658baef 2024-12-06T10:13:43,093 DEBUG [M:0;552d6a33fa09:32833 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/be2e39b543cd45d98fa41e21a7cc57eb is 69, key is 552d6a33fa09,44863,1733479972787/rs:state/1733479973028/Put/seqid=0 2024-12-06T10:13:43,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43037 is added to blk_1073741848_1033 (size=5156) 2024-12-06T10:13:43,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44949 is added to blk_1073741848_1033 (size=5156) 2024-12-06T10:13:43,100 INFO [M:0;552d6a33fa09:32833 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/be2e39b543cd45d98fa41e21a7cc57eb 2024-12-06T10:13:43,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:13:43,112 INFO [RS:0;552d6a33fa09:44863 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,44863,1733479972787; zookeeper connection closed. 2024-12-06T10:13:43,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44863-0x10066d0f8560001, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:13:43,112 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@434e9406 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@434e9406 2024-12-06T10:13:43,112 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T10:13:43,121 DEBUG [M:0;552d6a33fa09:32833 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/49585793be00490084a39bbbfd0a567d is 52, key is load_balancer_on/state:d/1733479974114/Put/seqid=0 2024-12-06T10:13:43,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44949 is added to blk_1073741849_1034 (size=5056) 2024-12-06T10:13:43,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43037 is added to blk_1073741849_1034 (size=5056) 2024-12-06T10:13:43,126 INFO [M:0;552d6a33fa09:32833 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/49585793be00490084a39bbbfd0a567d 2024-12-06T10:13:43,132 DEBUG [M:0;552d6a33fa09:32833 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b98ed9d9a5734088a0c9ac01651eb564 as hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b98ed9d9a5734088a0c9ac01651eb564 2024-12-06T10:13:43,137 INFO [M:0;552d6a33fa09:32833 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b98ed9d9a5734088a0c9ac01651eb564, entries=8, sequenceid=96, filesize=5.5 K 2024-12-06T10:13:43,138 DEBUG [M:0;552d6a33fa09:32833 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7ea808e27e7c4883bfeee3c2b658baef as hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7ea808e27e7c4883bfeee3c2b658baef 2024-12-06T10:13:43,144 INFO [M:0;552d6a33fa09:32833 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7ea808e27e7c4883bfeee3c2b658baef, entries=11, sequenceid=96, filesize=7.3 K 2024-12-06T10:13:43,145 DEBUG [M:0;552d6a33fa09:32833 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/be2e39b543cd45d98fa41e21a7cc57eb as hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/be2e39b543cd45d98fa41e21a7cc57eb 2024-12-06T10:13:43,150 INFO [M:0;552d6a33fa09:32833 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/be2e39b543cd45d98fa41e21a7cc57eb, entries=1, sequenceid=96, filesize=5.0 K 2024-12-06T10:13:43,151 DEBUG [M:0;552d6a33fa09:32833 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/49585793be00490084a39bbbfd0a567d as hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/49585793be00490084a39bbbfd0a567d 2024-12-06T10:13:43,156 INFO [M:0;552d6a33fa09:32833 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36887/user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/49585793be00490084a39bbbfd0a567d, entries=1, sequenceid=96, filesize=4.9 K 2024-12-06T10:13:43,157 INFO [M:0;552d6a33fa09:32833 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.09 KB/41052, heapSize ~49.20 KB/50376, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=96, compaction requested=false 2024-12-06T10:13:43,159 INFO [M:0;552d6a33fa09:32833 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:13:43,159 DEBUG [M:0;552d6a33fa09:32833 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:13:43,159 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/3aaaed90-9207-580b-48d6-f465b7e67dbb/MasterData/WALs/552d6a33fa09,32833,1733479972739 2024-12-06T10:13:43,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44949 is added to blk_1073741841_1023 (size=757) 2024-12-06T10:13:43,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43037 is added to blk_1073741841_1023 (size=757) 2024-12-06T10:13:43,164 INFO [M:0;552d6a33fa09:32833 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T10:13:43,164 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:13:43,164 INFO [M:0;552d6a33fa09:32833 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:32833 2024-12-06T10:13:43,166 DEBUG [M:0;552d6a33fa09:32833 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/552d6a33fa09,32833,1733479972739 already deleted, retry=false 2024-12-06T10:13:43,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:13:43,267 INFO [M:0;552d6a33fa09:32833 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,32833,1733479972739; zookeeper connection closed. 2024-12-06T10:13:43,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32833-0x10066d0f8560000, quorum=127.0.0.1:54272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:13:43,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4934db34{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:13:43,270 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@129adbbb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:13:43,270 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:13:43,271 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47350e54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:13:43,271 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1cf6748f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,STOPPED} 2024-12-06T10:13:43,273 WARN [BP-1837903265-172.17.0.2-1733479972059 heartbeating to localhost/127.0.0.1:36887 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:13:43,273 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:13:43,273 WARN [BP-1837903265-172.17.0.2-1733479972059 heartbeating to localhost/127.0.0.1:36887 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1837903265-172.17.0.2-1733479972059 (Datanode Uuid 1649f975-9c26-4aac-a105-dd32c7c565d5) service to localhost/127.0.0.1:36887 2024-12-06T10:13:43,273 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:13:43,274 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data3/current/BP-1837903265-172.17.0.2-1733479972059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:13:43,274 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data4/current/BP-1837903265-172.17.0.2-1733479972059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:13:43,275 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:13:43,285 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ada0022{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:13:43,285 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b8b5d80{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:13:43,286 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:13:43,286 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69b93707{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:13:43,286 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53f2a5ca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,STOPPED} 2024-12-06T10:13:43,290 WARN [BP-1837903265-172.17.0.2-1733479972059 heartbeating to localhost/127.0.0.1:36887 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:13:43,290 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:13:43,290 WARN [BP-1837903265-172.17.0.2-1733479972059 heartbeating to localhost/127.0.0.1:36887 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1837903265-172.17.0.2-1733479972059 (Datanode Uuid 5ce381aa-be4d-4717-bb1c-4292513b0537) service to localhost/127.0.0.1:36887 2024-12-06T10:13:43,290 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:13:43,291 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data1/current/BP-1837903265-172.17.0.2-1733479972059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:13:43,291 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/cluster_2bd6cdbc-0dcc-090f-135c-48ede117300e/dfs/data/data2/current/BP-1837903265-172.17.0.2-1733479972059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:13:43,292 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:13:43,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b6d77e9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:13:43,299 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4af4f66f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:13:43,299 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:13:43,299 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75e986b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:13:43,299 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a7064e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir/,STOPPED} 2024-12-06T10:13:43,305 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T10:13:43,322 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-06T10:13:43,330 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=100 (was 86) Potentially hanging thread: nioEventLoopGroup-26-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:36887 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36887 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36887 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:36887 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:36887 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:36887 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36887 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36887 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=444 (was 426) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=153 (was 222), ProcessCount=11 (was 11), AvailableMemoryMB=6674 (was 6774) 2024-12-06T10:13:43,338 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=100, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=153, ProcessCount=11, AvailableMemoryMB=6674 2024-12-06T10:13:43,338 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T10:13:43,338 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.log.dir so I do NOT create it in target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53 2024-12-06T10:13:43,338 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/ecf6356a-df52-b77a-a2ad-3bf990f70fc2/hadoop.tmp.dir so I do NOT create it in target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53 2024-12-06T10:13:43,338 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/cluster_2c428629-78ca-38f4-f743-64d77fccb27f, deleteOnExit=true 2024-12-06T10:13:43,338 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T10:13:43,338 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/test.cache.data in system properties and HBase conf 2024-12-06T10:13:43,338 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T10:13:43,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/hadoop.log.dir in system properties and HBase conf 2024-12-06T10:13:43,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T10:13:43,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T10:13:43,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T10:13:43,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:43,339 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T10:13:43,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:13:43,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:13:43,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T10:13:43,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:13:43,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T10:13:43,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T10:13:43,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:13:43,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:13:43,340 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T10:13:43,340 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/nfs.dump.dir in system properties and HBase conf 2024-12-06T10:13:43,340 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/java.io.tmpdir in system properties and HBase conf 2024-12-06T10:13:43,340 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:13:43,340 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T10:13:43,340 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T10:13:43,353 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T10:13:43,423 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:13:43,427 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:13:43,428 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:13:43,428 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:13:43,429 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:13:43,429 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:13:43,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15bb8b05{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:13:43,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d73ede7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:13:43,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@20b89acb{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/java.io.tmpdir/jetty-localhost-34061-hadoop-hdfs-3_4_1-tests_jar-_-any-16194522117524788275/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:13:43,546 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f9c06d4{HTTP/1.1, (http/1.1)}{localhost:34061} 2024-12-06T10:13:43,546 INFO [Time-limited test {}] server.Server(415): Started @224838ms 2024-12-06T10:13:43,559 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T10:13:43,633 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:13:43,636 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:13:43,636 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:13:43,636 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:13:43,636 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:13:43,637 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@105e54da{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:13:43,637 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47065cf1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:13:43,751 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6fb68f59{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/java.io.tmpdir/jetty-localhost-39579-hadoop-hdfs-3_4_1-tests_jar-_-any-11689701826731731114/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:13:43,751 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2a1e42bd{HTTP/1.1, (http/1.1)}{localhost:39579} 2024-12-06T10:13:43,751 INFO [Time-limited test {}] server.Server(415): Started @225043ms 2024-12-06T10:13:43,752 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:13:43,791 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:13:43,800 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:13:43,801 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:13:43,801 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:13:43,801 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:13:43,804 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4448dbfe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:13:43,804 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4249af65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:13:43,850 WARN [Thread-1364 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/cluster_2c428629-78ca-38f4-f743-64d77fccb27f/dfs/data/data1/current/BP-32812406-172.17.0.2-1733480023371/current, will proceed with Du for space computation calculation, 2024-12-06T10:13:43,851 WARN [Thread-1365 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/cluster_2c428629-78ca-38f4-f743-64d77fccb27f/dfs/data/data2/current/BP-32812406-172.17.0.2-1733480023371/current, will proceed with Du for space computation calculation, 2024-12-06T10:13:43,874 WARN [Thread-1343 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:13:43,878 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x88275b1b4e72c190 with lease ID 0xa2c5f54621dc0f9: Processing first storage report for DS-da33d38f-de04-484b-a9f8-1ebd9e031ae9 from datanode DatanodeRegistration(127.0.0.1:36003, datanodeUuid=d7d71029-613d-4956-b3af-08f8d4c8d19f, infoPort=34143, infoSecurePort=0, ipcPort=45077, storageInfo=lv=-57;cid=testClusterID;nsid=1402363145;c=1733480023371) 2024-12-06T10:13:43,878 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x88275b1b4e72c190 with lease ID 0xa2c5f54621dc0f9: from storage DS-da33d38f-de04-484b-a9f8-1ebd9e031ae9 node DatanodeRegistration(127.0.0.1:36003, datanodeUuid=d7d71029-613d-4956-b3af-08f8d4c8d19f, infoPort=34143, infoSecurePort=0, ipcPort=45077, storageInfo=lv=-57;cid=testClusterID;nsid=1402363145;c=1733480023371), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:13:43,878 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x88275b1b4e72c190 with lease ID 0xa2c5f54621dc0f9: Processing first storage report for DS-bc0db704-057f-4326-bfd6-66fe621d90de from datanode DatanodeRegistration(127.0.0.1:36003, datanodeUuid=d7d71029-613d-4956-b3af-08f8d4c8d19f, infoPort=34143, infoSecurePort=0, ipcPort=45077, storageInfo=lv=-57;cid=testClusterID;nsid=1402363145;c=1733480023371) 2024-12-06T10:13:43,878 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x88275b1b4e72c190 with lease ID 0xa2c5f54621dc0f9: from storage DS-bc0db704-057f-4326-bfd6-66fe621d90de node DatanodeRegistration(127.0.0.1:36003, datanodeUuid=d7d71029-613d-4956-b3af-08f8d4c8d19f, infoPort=34143, infoSecurePort=0, ipcPort=45077, storageInfo=lv=-57;cid=testClusterID;nsid=1402363145;c=1733480023371), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:13:43,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6a5f9711{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/java.io.tmpdir/jetty-localhost-33335-hadoop-hdfs-3_4_1-tests_jar-_-any-2377373554893246132/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:13:43,929 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13fdc1a1{HTTP/1.1, (http/1.1)}{localhost:33335} 2024-12-06T10:13:43,929 INFO [Time-limited test {}] server.Server(415): Started @225220ms 2024-12-06T10:13:43,930 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:13:44,012 WARN [Thread-1390 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/cluster_2c428629-78ca-38f4-f743-64d77fccb27f/dfs/data/data3/current/BP-32812406-172.17.0.2-1733480023371/current, will proceed with Du for space computation calculation, 2024-12-06T10:13:44,013 WARN [Thread-1391 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/cluster_2c428629-78ca-38f4-f743-64d77fccb27f/dfs/data/data4/current/BP-32812406-172.17.0.2-1733480023371/current, will proceed with Du for space computation calculation, 2024-12-06T10:13:44,037 WARN [Thread-1379 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:13:44,040 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc99f5cf2e4ac0ba6 with lease ID 0xa2c5f54621dc0fa: Processing first storage report for DS-61165e62-ec41-4923-9987-37060461cb5c from datanode DatanodeRegistration(127.0.0.1:40655, datanodeUuid=45dd6cc8-6660-43a7-b15e-8b93934fe17b, infoPort=42213, infoSecurePort=0, ipcPort=37509, storageInfo=lv=-57;cid=testClusterID;nsid=1402363145;c=1733480023371) 2024-12-06T10:13:44,040 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc99f5cf2e4ac0ba6 with lease ID 0xa2c5f54621dc0fa: from storage DS-61165e62-ec41-4923-9987-37060461cb5c node DatanodeRegistration(127.0.0.1:40655, datanodeUuid=45dd6cc8-6660-43a7-b15e-8b93934fe17b, infoPort=42213, infoSecurePort=0, ipcPort=37509, storageInfo=lv=-57;cid=testClusterID;nsid=1402363145;c=1733480023371), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:13:44,040 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc99f5cf2e4ac0ba6 with lease ID 0xa2c5f54621dc0fa: Processing first storage report for DS-4b442042-0860-40b2-9ccf-41c61b7da155 from datanode DatanodeRegistration(127.0.0.1:40655, datanodeUuid=45dd6cc8-6660-43a7-b15e-8b93934fe17b, infoPort=42213, infoSecurePort=0, ipcPort=37509, storageInfo=lv=-57;cid=testClusterID;nsid=1402363145;c=1733480023371) 2024-12-06T10:13:44,040 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc99f5cf2e4ac0ba6 with lease ID 0xa2c5f54621dc0fa: from storage DS-4b442042-0860-40b2-9ccf-41c61b7da155 node DatanodeRegistration(127.0.0.1:40655, datanodeUuid=45dd6cc8-6660-43a7-b15e-8b93934fe17b, infoPort=42213, infoSecurePort=0, ipcPort=37509, storageInfo=lv=-57;cid=testClusterID;nsid=1402363145;c=1733480023371), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:13:44,058 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53 2024-12-06T10:13:44,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,059 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,059 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,059 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,059 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,060 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,070 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/cluster_2c428629-78ca-38f4-f743-64d77fccb27f/zookeeper_0, clientPort=51385, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/cluster_2c428629-78ca-38f4-f743-64d77fccb27f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/cluster_2c428629-78ca-38f4-f743-64d77fccb27f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T10:13:44,071 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=51385 2024-12-06T10:13:44,071 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:13:44,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,072 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,072 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,073 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,073 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:13:44,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741825_1001 (size=7) 2024-12-06T10:13:44,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741825_1001 (size=7) 2024-12-06T10:13:44,087 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7 with version=8 2024-12-06T10:13:44,087 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/hbase-staging 2024-12-06T10:13:44,089 INFO [Time-limited test {}] client.ConnectionUtils(129): master/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:13:44,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:13:44,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:13:44,089 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:13:44,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:13:44,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:13:44,090 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:13:44,090 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:13:44,090 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:34949 2024-12-06T10:13:44,091 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:13:44,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:13:44,096 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:34949 connecting to ZooKeeper ensemble=127.0.0.1:51385 2024-12-06T10:13:44,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:349490x0, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:13:44,107 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34949-0x10066d1c0e60000 connected 2024-12-06T10:13:44,130 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:13:44,131 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:13:44,131 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:13:44,132 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34949 2024-12-06T10:13:44,132 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34949 2024-12-06T10:13:44,136 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34949 2024-12-06T10:13:44,140 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34949 2024-12-06T10:13:44,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34949 2024-12-06T10:13:44,143 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7, hbase.cluster.distributed=false 2024-12-06T10:13:44,168 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:13:44,168 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:13:44,169 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:13:44,169 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:13:44,169 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:13:44,169 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:13:44,169 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:13:44,169 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:13:44,170 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42775 2024-12-06T10:13:44,170 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T10:13:44,171 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T10:13:44,171 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:13:44,173 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:13:44,176 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42775 connecting to ZooKeeper ensemble=127.0.0.1:51385 2024-12-06T10:13:44,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:427750x0, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:13:44,179 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:427750x0, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:13:44,179 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42775-0x10066d1c0e60001 connected 2024-12-06T10:13:44,179 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:13:44,180 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:13:44,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42775 2024-12-06T10:13:44,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42775 2024-12-06T10:13:44,181 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42775 2024-12-06T10:13:44,181 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42775 2024-12-06T10:13:44,181 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42775 2024-12-06T10:13:44,182 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/552d6a33fa09,34949,1733480024089 2024-12-06T10:13:44,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:13:44,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:13:44,184 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/552d6a33fa09,34949,1733480024089 2024-12-06T10:13:44,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:13:44,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:44,186 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:13:44,186 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/552d6a33fa09,34949,1733480024089 from backup master directory 2024-12-06T10:13:44,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:13:44,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:44,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/552d6a33fa09,34949,1733480024089 2024-12-06T10:13:44,188 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:13:44,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:13:44,188 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:13:44,188 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=552d6a33fa09,34949,1733480024089 2024-12-06T10:13:44,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:13:44,195 DEBUG [M:0;552d6a33fa09:34949 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;552d6a33fa09:34949 2024-12-06T10:13:44,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741826_1002 (size=42) 2024-12-06T10:13:44,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741826_1002 (size=42) 2024-12-06T10:13:44,201 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/hbase.id with ID: 4628c718-7dd8-452e-b533-deacf06db879 2024-12-06T10:13:44,212 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:13:44,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:44,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:44,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741827_1003 (size=196) 2024-12-06T10:13:44,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741827_1003 (size=196) 2024-12-06T10:13:44,223 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:13:44,224 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T10:13:44,224 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:13:44,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:13:44,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:13:44,232 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store 2024-12-06T10:13:44,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741829_1005 (size=34) 2024-12-06T10:13:44,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741829_1005 (size=34) 2024-12-06T10:13:44,240 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:13:44,240 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:13:44,240 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:13:44,240 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:13:44,240 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:13:44,240 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:13:44,240 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:13:44,240 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:13:44,241 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/.initializing 2024-12-06T10:13:44,241 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/WALs/552d6a33fa09,34949,1733480024089 2024-12-06T10:13:44,243 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C34949%2C1733480024089, suffix=, logDir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/WALs/552d6a33fa09,34949,1733480024089, archiveDir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/oldWALs, maxLogs=10 2024-12-06T10:13:44,244 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C34949%2C1733480024089.1733480024244 2024-12-06T10:13:44,252 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/WALs/552d6a33fa09,34949,1733480024089/552d6a33fa09%2C34949%2C1733480024089.1733480024244 2024-12-06T10:13:44,252 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42213:42213),(127.0.0.1/127.0.0.1:34143:34143)] 2024-12-06T10:13:44,252 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:13:44,252 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:13:44,252 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:13:44,252 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:13:44,254 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:13:44,255 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T10:13:44,255 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:44,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:13:44,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:13:44,258 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T10:13:44,258 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:44,258 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:13:44,258 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:13:44,260 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T10:13:44,260 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:44,260 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:13:44,261 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:13:44,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T10:13:44,262 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:44,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:13:44,263 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:13:44,264 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:13:44,266 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T10:13:44,267 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:13:44,269 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:13:44,270 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=789957, jitterRate=0.004482567310333252}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T10:13:44,270 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:13:44,271 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T10:13:44,274 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bfe920e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:13:44,275 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T10:13:44,275 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T10:13:44,275 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T10:13:44,275 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T10:13:44,276 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T10:13:44,276 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-06T10:13:44,276 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T10:13:44,278 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T10:13:44,279 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T10:13:44,280 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T10:13:44,280 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T10:13:44,281 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T10:13:44,282 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T10:13:44,282 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T10:13:44,283 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T10:13:44,284 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T10:13:44,285 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T10:13:44,286 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T10:13:44,287 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T10:13:44,288 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T10:13:44,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:13:44,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:13:44,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:44,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:44,290 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=552d6a33fa09,34949,1733480024089, sessionid=0x10066d1c0e60000, setting cluster-up flag (Was=false) 2024-12-06T10:13:44,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:44,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:44,298 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T10:13:44,299 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,34949,1733480024089 2024-12-06T10:13:44,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:44,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:44,306 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T10:13:44,307 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,34949,1733480024089 2024-12-06T10:13:44,309 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T10:13:44,310 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T10:13:44,310 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T10:13:44,310 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 552d6a33fa09,34949,1733480024089 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T10:13:44,310 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:13:44,310 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:13:44,310 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:13:44,310 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:13:44,310 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/552d6a33fa09:0, corePoolSize=10, maxPoolSize=10 2024-12-06T10:13:44,310 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:13:44,311 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:13:44,311 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:13:44,311 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733480054311 2024-12-06T10:13:44,311 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T10:13:44,312 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T10:13:44,312 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T10:13:44,312 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T10:13:44,312 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T10:13:44,312 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T10:13:44,312 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,312 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:13:44,312 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T10:13:44,312 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T10:13:44,312 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T10:13:44,312 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T10:13:44,313 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T10:13:44,313 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T10:13:44,313 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:44,314 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733480024313,5,FailOnTimeoutGroup] 2024-12-06T10:13:44,313 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:13:44,314 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733480024314,5,FailOnTimeoutGroup] 2024-12-06T10:13:44,314 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,314 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T10:13:44,314 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,314 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741831_1007 (size=1039) 2024-12-06T10:13:44,322 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T10:13:44,322 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7 2024-12-06T10:13:44,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741831_1007 (size=1039) 2024-12-06T10:13:44,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741832_1008 (size=32) 2024-12-06T10:13:44,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741832_1008 (size=32) 2024-12-06T10:13:44,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:13:44,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:13:44,334 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:13:44,334 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:44,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:13:44,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:13:44,336 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:13:44,336 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:44,337 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:13:44,337 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:13:44,338 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:13:44,338 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:44,338 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:13:44,339 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740 2024-12-06T10:13:44,339 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740 2024-12-06T10:13:44,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:44,341 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:13:44,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:13:44,344 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:13:44,344 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803635, jitterRate=0.021875053644180298}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:13:44,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:13:44,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:13:44,344 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:13:44,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:13:44,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:13:44,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:13:44,344 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T10:13:44,345 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:13:44,346 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:13:44,346 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T10:13:44,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T10:13:44,347 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T10:13:44,348 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T10:13:44,393 DEBUG [RS:0;552d6a33fa09:42775 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;552d6a33fa09:42775 2024-12-06T10:13:44,395 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(1008): ClusterId : 4628c718-7dd8-452e-b533-deacf06db879 2024-12-06T10:13:44,395 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T10:13:44,397 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T10:13:44,397 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T10:13:44,398 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T10:13:44,399 DEBUG [RS:0;552d6a33fa09:42775 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b998181, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:13:44,399 DEBUG [RS:0;552d6a33fa09:42775 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@744af1ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:13:44,399 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T10:13:44,399 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T10:13:44,399 DEBUG [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T10:13:44,400 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(3073): reportForDuty to master=552d6a33fa09,34949,1733480024089 with isa=552d6a33fa09/172.17.0.2:42775, startcode=1733480024167 2024-12-06T10:13:44,400 DEBUG [RS:0;552d6a33fa09:42775 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:13:44,402 INFO [RS-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41675, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:13:44,402 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34949 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 552d6a33fa09,42775,1733480024167 2024-12-06T10:13:44,402 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34949 {}] master.ServerManager(486): Registering regionserver=552d6a33fa09,42775,1733480024167 2024-12-06T10:13:44,404 DEBUG [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7 2024-12-06T10:13:44,404 DEBUG [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:43379 2024-12-06T10:13:44,404 DEBUG [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T10:13:44,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:13:44,406 DEBUG [RS:0;552d6a33fa09:42775 {}] zookeeper.ZKUtil(111): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/552d6a33fa09,42775,1733480024167 2024-12-06T10:13:44,406 WARN [RS:0;552d6a33fa09:42775 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:13:44,406 INFO [RS:0;552d6a33fa09:42775 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:13:44,406 DEBUG [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167 2024-12-06T10:13:44,406 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [552d6a33fa09,42775,1733480024167] 2024-12-06T10:13:44,410 DEBUG [RS:0;552d6a33fa09:42775 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T10:13:44,410 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T10:13:44,412 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T10:13:44,413 INFO [RS:0;552d6a33fa09:42775 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T10:13:44,413 INFO [RS:0;552d6a33fa09:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,415 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T10:13:44,416 INFO [RS:0;552d6a33fa09:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,417 DEBUG [RS:0;552d6a33fa09:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:13:44,417 DEBUG [RS:0;552d6a33fa09:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:13:44,417 DEBUG [RS:0;552d6a33fa09:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:13:44,417 DEBUG [RS:0;552d6a33fa09:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:13:44,417 DEBUG [RS:0;552d6a33fa09:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:13:44,417 DEBUG [RS:0;552d6a33fa09:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:13:44,417 DEBUG [RS:0;552d6a33fa09:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:13:44,417 DEBUG [RS:0;552d6a33fa09:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:13:44,417 DEBUG [RS:0;552d6a33fa09:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:13:44,417 DEBUG [RS:0;552d6a33fa09:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:13:44,417 DEBUG [RS:0;552d6a33fa09:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:13:44,417 DEBUG [RS:0;552d6a33fa09:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:13:44,418 DEBUG [RS:0;552d6a33fa09:42775 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:13:44,422 INFO [RS:0;552d6a33fa09:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,422 INFO [RS:0;552d6a33fa09:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,422 INFO [RS:0;552d6a33fa09:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,422 INFO [RS:0;552d6a33fa09:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,422 INFO [RS:0;552d6a33fa09:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,42775,1733480024167-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:13:44,445 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T10:13:44,445 INFO [RS:0;552d6a33fa09:42775 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,42775,1733480024167-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,468 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.Replication(204): 552d6a33fa09,42775,1733480024167 started 2024-12-06T10:13:44,468 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(1767): Serving as 552d6a33fa09,42775,1733480024167, RpcServer on 552d6a33fa09/172.17.0.2:42775, sessionid=0x10066d1c0e60001 2024-12-06T10:13:44,468 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T10:13:44,468 DEBUG [RS:0;552d6a33fa09:42775 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 552d6a33fa09,42775,1733480024167 2024-12-06T10:13:44,468 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,42775,1733480024167' 2024-12-06T10:13:44,468 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T10:13:44,469 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T10:13:44,469 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T10:13:44,469 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T10:13:44,470 DEBUG [RS:0;552d6a33fa09:42775 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 552d6a33fa09,42775,1733480024167 2024-12-06T10:13:44,470 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,42775,1733480024167' 2024-12-06T10:13:44,470 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T10:13:44,470 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T10:13:44,470 DEBUG [RS:0;552d6a33fa09:42775 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T10:13:44,470 INFO [RS:0;552d6a33fa09:42775 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T10:13:44,470 INFO [RS:0;552d6a33fa09:42775 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T10:13:44,498 WARN [552d6a33fa09:34949 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-06T10:13:44,573 INFO [RS:0;552d6a33fa09:42775 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C42775%2C1733480024167, suffix=, logDir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167, archiveDir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/oldWALs, maxLogs=32 2024-12-06T10:13:44,574 INFO [RS:0;552d6a33fa09:42775 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C42775%2C1733480024167.1733480024574 2024-12-06T10:13:44,574 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T10:13:44,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,583 INFO [RS:0;552d6a33fa09:42775 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167/552d6a33fa09%2C42775%2C1733480024167.1733480024574 2024-12-06T10:13:44,583 DEBUG [RS:0;552d6a33fa09:42775 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42213:42213),(127.0.0.1/127.0.0.1:34143:34143)] 2024-12-06T10:13:44,595 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,595 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,595 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,596 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,596 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:44,748 DEBUG [552d6a33fa09:34949 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T10:13:44,749 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=552d6a33fa09,42775,1733480024167 2024-12-06T10:13:44,750 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,42775,1733480024167, state=OPENING 2024-12-06T10:13:44,751 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T10:13:44,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:44,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:44,753 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=552d6a33fa09,42775,1733480024167}] 2024-12-06T10:13:44,753 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:13:44,753 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:13:44,906 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,42775,1733480024167 2024-12-06T10:13:44,906 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T10:13:44,908 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37896, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T10:13:44,912 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T10:13:44,912 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:13:44,913 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C42775%2C1733480024167.meta, suffix=.meta, logDir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167, archiveDir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/oldWALs, maxLogs=32 2024-12-06T10:13:44,914 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C42775%2C1733480024167.meta.1733480024914.meta 2024-12-06T10:13:44,919 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167/552d6a33fa09%2C42775%2C1733480024167.meta.1733480024914.meta 2024-12-06T10:13:44,919 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34143:34143),(127.0.0.1/127.0.0.1:42213:42213)] 2024-12-06T10:13:44,919 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:13:44,920 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T10:13:44,920 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T10:13:44,920 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T10:13:44,920 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T10:13:44,920 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:13:44,920 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T10:13:44,920 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T10:13:44,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:13:44,922 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:13:44,922 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:44,923 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:13:44,923 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:13:44,924 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:13:44,924 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:44,924 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:13:44,924 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:13:44,925 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:13:44,925 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:44,925 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:13:44,926 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740 2024-12-06T10:13:44,927 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740 2024-12-06T10:13:44,928 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:13:44,929 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:13:44,930 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689196, jitterRate=-0.12364248931407928}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:13:44,930 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:13:44,931 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733480024906 2024-12-06T10:13:44,933 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T10:13:44,933 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T10:13:44,933 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,42775,1733480024167 2024-12-06T10:13:44,934 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,42775,1733480024167, state=OPEN 2024-12-06T10:13:44,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:13:44,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:13:44,938 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:13:44,938 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:13:44,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T10:13:44,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=552d6a33fa09,42775,1733480024167 in 185 msec 2024-12-06T10:13:44,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T10:13:44,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 594 msec 2024-12-06T10:13:44,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 634 msec 2024-12-06T10:13:44,944 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733480024944, completionTime=-1 2024-12-06T10:13:44,944 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T10:13:44,944 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T10:13:44,944 DEBUG [hconnection-0x427df64c-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:13:44,946 INFO [RS-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37908, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:13:44,947 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T10:13:44,947 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733480084947 2024-12-06T10:13:44,947 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733480144947 2024-12-06T10:13:44,947 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 2 msec 2024-12-06T10:13:44,952 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,34949,1733480024089-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,952 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,34949,1733480024089-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,952 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,34949,1733480024089-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,952 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-552d6a33fa09:34949, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,952 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:44,953 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T10:13:44,953 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:13:44,954 DEBUG [master/552d6a33fa09:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T10:13:44,954 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T10:13:44,955 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:13:44,955 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:44,956 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:13:44,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741835_1011 (size=358) 2024-12-06T10:13:44,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741835_1011 (size=358) 2024-12-06T10:13:44,965 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6e1422b1a6f851beaaa131ba6b1e204d, NAME => 'hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7 2024-12-06T10:13:44,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741836_1012 (size=42) 2024-12-06T10:13:44,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741836_1012 (size=42) 2024-12-06T10:13:44,972 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:13:44,972 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 6e1422b1a6f851beaaa131ba6b1e204d, disabling compactions & flushes 2024-12-06T10:13:44,972 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:13:44,972 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:13:44,972 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. after waiting 0 ms 2024-12-06T10:13:44,972 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:13:44,972 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:13:44,972 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6e1422b1a6f851beaaa131ba6b1e204d: 2024-12-06T10:13:44,973 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:13:44,973 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733480024973"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480024973"}]},"ts":"1733480024973"} 2024-12-06T10:13:44,975 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:13:44,976 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:13:44,977 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480024976"}]},"ts":"1733480024976"} 2024-12-06T10:13:44,978 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T10:13:44,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=6e1422b1a6f851beaaa131ba6b1e204d, ASSIGN}] 2024-12-06T10:13:44,983 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=6e1422b1a6f851beaaa131ba6b1e204d, ASSIGN 2024-12-06T10:13:44,983 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=6e1422b1a6f851beaaa131ba6b1e204d, ASSIGN; state=OFFLINE, location=552d6a33fa09,42775,1733480024167; forceNewPlan=false, retain=false 2024-12-06T10:13:45,134 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=6e1422b1a6f851beaaa131ba6b1e204d, regionState=OPENING, regionLocation=552d6a33fa09,42775,1733480024167 2024-12-06T10:13:45,136 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 6e1422b1a6f851beaaa131ba6b1e204d, server=552d6a33fa09,42775,1733480024167}] 2024-12-06T10:13:45,196 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-06T10:13:45,289 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,42775,1733480024167 2024-12-06T10:13:45,292 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:13:45,292 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 6e1422b1a6f851beaaa131ba6b1e204d, NAME => 'hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:13:45,293 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 6e1422b1a6f851beaaa131ba6b1e204d 2024-12-06T10:13:45,293 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:13:45,293 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 6e1422b1a6f851beaaa131ba6b1e204d 2024-12-06T10:13:45,293 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 6e1422b1a6f851beaaa131ba6b1e204d 2024-12-06T10:13:45,294 INFO [StoreOpener-6e1422b1a6f851beaaa131ba6b1e204d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6e1422b1a6f851beaaa131ba6b1e204d 2024-12-06T10:13:45,296 INFO [StoreOpener-6e1422b1a6f851beaaa131ba6b1e204d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e1422b1a6f851beaaa131ba6b1e204d columnFamilyName info 2024-12-06T10:13:45,296 DEBUG [StoreOpener-6e1422b1a6f851beaaa131ba6b1e204d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:45,296 INFO [StoreOpener-6e1422b1a6f851beaaa131ba6b1e204d-1 {}] regionserver.HStore(327): Store=6e1422b1a6f851beaaa131ba6b1e204d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:13:45,297 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/namespace/6e1422b1a6f851beaaa131ba6b1e204d 2024-12-06T10:13:45,298 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/namespace/6e1422b1a6f851beaaa131ba6b1e204d 2024-12-06T10:13:45,300 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 6e1422b1a6f851beaaa131ba6b1e204d 2024-12-06T10:13:45,302 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/namespace/6e1422b1a6f851beaaa131ba6b1e204d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:13:45,303 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 6e1422b1a6f851beaaa131ba6b1e204d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818246, jitterRate=0.04045423865318298}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:13:45,303 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 6e1422b1a6f851beaaa131ba6b1e204d: 2024-12-06T10:13:45,304 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d., pid=6, masterSystemTime=1733480025289 2024-12-06T10:13:45,306 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:13:45,306 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:13:45,307 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=6e1422b1a6f851beaaa131ba6b1e204d, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,42775,1733480024167 2024-12-06T10:13:45,311 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T10:13:45,311 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 6e1422b1a6f851beaaa131ba6b1e204d, server=552d6a33fa09,42775,1733480024167 in 173 msec 2024-12-06T10:13:45,313 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T10:13:45,313 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=6e1422b1a6f851beaaa131ba6b1e204d, ASSIGN in 330 msec 2024-12-06T10:13:45,314 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:13:45,314 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480025314"}]},"ts":"1733480025314"} 2024-12-06T10:13:45,316 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T10:13:45,319 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:13:45,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 366 msec 2024-12-06T10:13:45,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:45,355 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T10:13:45,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:13:45,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:45,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:13:45,362 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T10:13:45,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:13:45,375 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 12 msec 2024-12-06T10:13:45,384 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T10:13:45,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:13:45,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 9 msec 2024-12-06T10:13:45,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T10:13:45,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T10:13:45,400 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.212sec 2024-12-06T10:13:45,400 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T10:13:45,400 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T10:13:45,400 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T10:13:45,400 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T10:13:45,400 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T10:13:45,400 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,34949,1733480024089-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:13:45,401 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,34949,1733480024089-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T10:13:45,402 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T10:13:45,403 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T10:13:45,403 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,34949,1733480024089-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:13:45,484 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x20521892 to 127.0.0.1:51385 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50de41ae 2024-12-06T10:13:45,487 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6badb3a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:13:45,489 DEBUG [hconnection-0x1faba1da-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:13:45,491 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37910, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:13:45,492 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=552d6a33fa09,34949,1733480024089 2024-12-06T10:13:45,493 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:13:45,496 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-06T10:13:45,497 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T10:13:45,501 INFO [RS-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60162, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T10:13:45,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T10:13:45,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T10:13:45,502 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:13:45,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T10:13:45,504 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:13:45,504 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:45,504 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 9 2024-12-06T10:13:45,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:13:45,505 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:13:45,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741837_1013 (size=405) 2024-12-06T10:13:45,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741837_1013 (size=405) 2024-12-06T10:13:45,515 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9b9745a5d3e30df4c291865480dafb79, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7 2024-12-06T10:13:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741838_1014 (size=88) 2024-12-06T10:13:45,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741838_1014 (size=88) 2024-12-06T10:13:45,521 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:13:45,521 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1681): Closing 9b9745a5d3e30df4c291865480dafb79, disabling compactions & flushes 2024-12-06T10:13:45,521 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:13:45,522 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:13:45,522 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. after waiting 0 ms 2024-12-06T10:13:45,522 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:13:45,522 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:13:45,522 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9b9745a5d3e30df4c291865480dafb79: 2024-12-06T10:13:45,523 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:13:45,523 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733480025523"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480025523"}]},"ts":"1733480025523"} 2024-12-06T10:13:45,525 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:13:45,526 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:13:45,526 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480025526"}]},"ts":"1733480025526"} 2024-12-06T10:13:45,527 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-06T10:13:45,531 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9b9745a5d3e30df4c291865480dafb79, ASSIGN}] 2024-12-06T10:13:45,532 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9b9745a5d3e30df4c291865480dafb79, ASSIGN 2024-12-06T10:13:45,533 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9b9745a5d3e30df4c291865480dafb79, ASSIGN; state=OFFLINE, location=552d6a33fa09,42775,1733480024167; forceNewPlan=false, retain=false 2024-12-06T10:13:45,684 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=9b9745a5d3e30df4c291865480dafb79, regionState=OPENING, regionLocation=552d6a33fa09,42775,1733480024167 2024-12-06T10:13:45,686 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 9b9745a5d3e30df4c291865480dafb79, server=552d6a33fa09,42775,1733480024167}] 2024-12-06T10:13:45,838 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,42775,1733480024167 2024-12-06T10:13:45,842 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:13:45,842 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 9b9745a5d3e30df4c291865480dafb79, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:13:45,843 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 9b9745a5d3e30df4c291865480dafb79 2024-12-06T10:13:45,843 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:13:45,843 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 9b9745a5d3e30df4c291865480dafb79 2024-12-06T10:13:45,843 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 9b9745a5d3e30df4c291865480dafb79 2024-12-06T10:13:45,844 INFO [StoreOpener-9b9745a5d3e30df4c291865480dafb79-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9b9745a5d3e30df4c291865480dafb79 2024-12-06T10:13:45,846 INFO [StoreOpener-9b9745a5d3e30df4c291865480dafb79-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9b9745a5d3e30df4c291865480dafb79 columnFamilyName info 2024-12-06T10:13:45,846 DEBUG [StoreOpener-9b9745a5d3e30df4c291865480dafb79-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:13:45,847 INFO [StoreOpener-9b9745a5d3e30df4c291865480dafb79-1 {}] regionserver.HStore(327): Store=9b9745a5d3e30df4c291865480dafb79/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:13:45,847 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79 2024-12-06T10:13:45,848 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79 2024-12-06T10:13:45,850 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 9b9745a5d3e30df4c291865480dafb79 2024-12-06T10:13:45,852 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:13:45,852 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 9b9745a5d3e30df4c291865480dafb79; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777809, jitterRate=-0.010964974761009216}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:13:45,853 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 9b9745a5d3e30df4c291865480dafb79: 2024-12-06T10:13:45,854 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79., pid=11, masterSystemTime=1733480025838 2024-12-06T10:13:45,855 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:13:45,855 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:13:45,856 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=9b9745a5d3e30df4c291865480dafb79, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,42775,1733480024167 2024-12-06T10:13:45,860 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-06T10:13:45,860 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 9b9745a5d3e30df4c291865480dafb79, server=552d6a33fa09,42775,1733480024167 in 172 msec 2024-12-06T10:13:45,862 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-06T10:13:45,862 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9b9745a5d3e30df4c291865480dafb79, ASSIGN in 329 msec 2024-12-06T10:13:45,863 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:13:45,863 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480025863"}]},"ts":"1733480025863"} 2024-12-06T10:13:45,865 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-06T10:13:45,867 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:13:45,868 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 365 msec 2024-12-06T10:13:46,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:47,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:48,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:49,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:50,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:50,422 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T10:13:50,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:13:50,453 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-06T10:13:51,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:52,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:53,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:54,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:54,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta after 68046ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor238.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:13:55,196 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T10:13:55,196 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-06T10:13:55,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:55,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:13:55,507 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 9 completed 2024-12-06T10:13:55,509 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T10:13:55,509 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:13:55,517 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush hbase:namespace 2024-12-06T10:13:55,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace 2024-12-06T10:13:55,523 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:13:55,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T10:13:55,524 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:13:55,525 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:13:55,683 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,42775,1733480024167 2024-12-06T10:13:55,684 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T10:13:55,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:13:55,685 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 6e1422b1a6f851beaaa131ba6b1e204d 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-06T10:13:55,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/namespace/6e1422b1a6f851beaaa131ba6b1e204d/.tmp/info/dff20f8622474aa48ec941c4fe99e7d7 is 45, key is default/info:d/1733480025367/Put/seqid=0 2024-12-06T10:13:55,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741839_1015 (size=5037) 2024-12-06T10:13:55,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741839_1015 (size=5037) 2024-12-06T10:13:55,708 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/namespace/6e1422b1a6f851beaaa131ba6b1e204d/.tmp/info/dff20f8622474aa48ec941c4fe99e7d7 2024-12-06T10:13:55,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/namespace/6e1422b1a6f851beaaa131ba6b1e204d/.tmp/info/dff20f8622474aa48ec941c4fe99e7d7 as hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/namespace/6e1422b1a6f851beaaa131ba6b1e204d/info/dff20f8622474aa48ec941c4fe99e7d7 2024-12-06T10:13:55,722 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/namespace/6e1422b1a6f851beaaa131ba6b1e204d/info/dff20f8622474aa48ec941c4fe99e7d7, entries=2, sequenceid=6, filesize=4.9 K 2024-12-06T10:13:55,723 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 6e1422b1a6f851beaaa131ba6b1e204d in 38ms, sequenceid=6, compaction requested=false 2024-12-06T10:13:55,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 6e1422b1a6f851beaaa131ba6b1e204d: 2024-12-06T10:13:55,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:13:55,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-06T10:13:55,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-06T10:13:55,729 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-06T10:13:55,729 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 202 msec 2024-12-06T10:13:55,731 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace in 212 msec 2024-12-06T10:13:56,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:57,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:58,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:13:59,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:00,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:01,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:02,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:03,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:04,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:05,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:05,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T10:14:05,525 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: hbase:namespace, procId: 12 completed 2024-12-06T10:14:05,532 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T10:14:05,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T10:14:05,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T10:14:05,534 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:14:05,535 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:14:05,535 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:14:05,688 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,42775,1733480024167 2024-12-06T10:14:05,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T10:14:05,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:05,689 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 9b9745a5d3e30df4c291865480dafb79 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T10:14:05,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/c930eba9be2f40368ba5bbda6c339303 is 1080, key is row0001/info:/1733480045529/Put/seqid=0 2024-12-06T10:14:05,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741840_1016 (size=6033) 2024-12-06T10:14:05,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741840_1016 (size=6033) 2024-12-06T10:14:05,713 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/c930eba9be2f40368ba5bbda6c339303 2024-12-06T10:14:05,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/c930eba9be2f40368ba5bbda6c339303 as hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/c930eba9be2f40368ba5bbda6c339303 2024-12-06T10:14:05,725 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/c930eba9be2f40368ba5bbda6c339303, entries=1, sequenceid=5, filesize=5.9 K 2024-12-06T10:14:05,726 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9b9745a5d3e30df4c291865480dafb79 in 37ms, sequenceid=5, compaction requested=false 2024-12-06T10:14:05,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 9b9745a5d3e30df4c291865480dafb79: 2024-12-06T10:14:05,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:05,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-06T10:14:05,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-06T10:14:05,730 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-06T10:14:05,730 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 193 msec 2024-12-06T10:14:05,732 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 198 msec 2024-12-06T10:14:06,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:07,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:08,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:09,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:10,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:11,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:12,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:13,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:14,058 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:14:14,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:15,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:15,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T10:14:15,536 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 14 completed 2024-12-06T10:14:15,541 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T10:14:15,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T10:14:15,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T10:14:15,543 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:14:15,544 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:14:15,544 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:14:15,696 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,42775,1733480024167 2024-12-06T10:14:15,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T10:14:15,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:15,697 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 9b9745a5d3e30df4c291865480dafb79 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T10:14:15,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/395793d0519646d9bebcceb1479418a4 is 1080, key is row0002/info:/1733480055537/Put/seqid=0 2024-12-06T10:14:15,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741841_1017 (size=6033) 2024-12-06T10:14:15,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741841_1017 (size=6033) 2024-12-06T10:14:15,707 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/395793d0519646d9bebcceb1479418a4 2024-12-06T10:14:15,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/395793d0519646d9bebcceb1479418a4 as hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/395793d0519646d9bebcceb1479418a4 2024-12-06T10:14:15,718 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/395793d0519646d9bebcceb1479418a4, entries=1, sequenceid=9, filesize=5.9 K 2024-12-06T10:14:15,719 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9b9745a5d3e30df4c291865480dafb79 in 22ms, sequenceid=9, compaction requested=false 2024-12-06T10:14:15,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 9b9745a5d3e30df4c291865480dafb79: 2024-12-06T10:14:15,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:15,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-06T10:14:15,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-06T10:14:15,723 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-06T10:14:15,723 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-12-06T10:14:15,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-12-06T10:14:16,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:17,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:18,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:18,399 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:14:18,401 INFO [RS-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34644, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:14:19,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:20,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:21,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:22,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:23,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:24,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:25,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:25,436 INFO [master/552d6a33fa09:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T10:14:25,436 INFO [master/552d6a33fa09:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T10:14:25,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T10:14:25,544 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 16 completed 2024-12-06T10:14:25,547 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C42775%2C1733480024167.1733480065546 2024-12-06T10:14:25,554 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167/552d6a33fa09%2C42775%2C1733480024167.1733480024574 with entries=13, filesize=6.41 KB; new WAL /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167/552d6a33fa09%2C42775%2C1733480024167.1733480065546 2024-12-06T10:14:25,554 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42213:42213),(127.0.0.1/127.0.0.1:34143:34143)] 2024-12-06T10:14:25,554 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167/552d6a33fa09%2C42775%2C1733480024167.1733480024574 is not closed yet, will try archiving it next time 2024-12-06T10:14:25,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741833_1009 (size=6574) 2024-12-06T10:14:25,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741833_1009 (size=6574) 2024-12-06T10:14:25,558 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T10:14:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T10:14:25,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T10:14:25,560 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:14:25,561 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:14:25,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:14:25,713 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,42775,1733480024167 2024-12-06T10:14:25,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-06T10:14:25,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:25,714 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 9b9745a5d3e30df4c291865480dafb79 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T10:14:25,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/186120730c47430abaeeef33b47ab27f is 1080, key is row0003/info:/1733480065545/Put/seqid=0 2024-12-06T10:14:25,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741843_1019 (size=6033) 2024-12-06T10:14:25,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741843_1019 (size=6033) 2024-12-06T10:14:25,725 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/186120730c47430abaeeef33b47ab27f 2024-12-06T10:14:25,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/186120730c47430abaeeef33b47ab27f as hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/186120730c47430abaeeef33b47ab27f 2024-12-06T10:14:25,737 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/186120730c47430abaeeef33b47ab27f, entries=1, sequenceid=13, filesize=5.9 K 2024-12-06T10:14:25,738 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9b9745a5d3e30df4c291865480dafb79 in 24ms, sequenceid=13, compaction requested=true 2024-12-06T10:14:25,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 9b9745a5d3e30df4c291865480dafb79: 2024-12-06T10:14:25,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:25,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-06T10:14:25,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-06T10:14:25,742 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-06T10:14:25,742 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-12-06T10:14:25,743 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-12-06T10:14:26,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:27,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:28,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:29,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:30,293 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 6e1422b1a6f851beaaa131ba6b1e204d, had cached 0 bytes from a total of 5037 2024-12-06T10:14:30,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:30,843 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 9b9745a5d3e30df4c291865480dafb79, had cached 0 bytes from a total of 18099 2024-12-06T10:14:31,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:32,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:33,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:34,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:35,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:35,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T10:14:35,562 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 18 completed 2024-12-06T10:14:35,562 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:14:35,564 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:14:35,564 DEBUG [Time-limited test {}] regionserver.HStore(1540): 9b9745a5d3e30df4c291865480dafb79/info is initiating minor compaction (all files) 2024-12-06T10:14:35,564 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T10:14:35,564 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:35,564 INFO [Time-limited test {}] regionserver.HRegion(2351): Starting compaction of 9b9745a5d3e30df4c291865480dafb79/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:35,564 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/c930eba9be2f40368ba5bbda6c339303, hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/395793d0519646d9bebcceb1479418a4, hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/186120730c47430abaeeef33b47ab27f] into tmpdir=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp, totalSize=17.7 K 2024-12-06T10:14:35,565 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting c930eba9be2f40368ba5bbda6c339303, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733480045529 2024-12-06T10:14:35,565 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 395793d0519646d9bebcceb1479418a4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733480055537 2024-12-06T10:14:35,565 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 186120730c47430abaeeef33b47ab27f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733480065545 2024-12-06T10:14:35,578 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 9b9745a5d3e30df4c291865480dafb79#info#compaction#29 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:14:35,579 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/bd5fe7b676a6496e92b802e6714061b3 is 1080, key is row0001/info:/1733480045529/Put/seqid=0 2024-12-06T10:14:35,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741844_1020 (size=8296) 2024-12-06T10:14:35,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741844_1020 (size=8296) 2024-12-06T10:14:35,591 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/bd5fe7b676a6496e92b802e6714061b3 as hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/bd5fe7b676a6496e92b802e6714061b3 2024-12-06T10:14:35,597 INFO [Time-limited test {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9b9745a5d3e30df4c291865480dafb79/info of 9b9745a5d3e30df4c291865480dafb79 into bd5fe7b676a6496e92b802e6714061b3(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:14:35,597 DEBUG [Time-limited test {}] regionserver.HRegion(2381): Compaction status journal for 9b9745a5d3e30df4c291865480dafb79: 2024-12-06T10:14:35,599 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C42775%2C1733480024167.1733480075599 2024-12-06T10:14:35,610 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167/552d6a33fa09%2C42775%2C1733480024167.1733480065546 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167/552d6a33fa09%2C42775%2C1733480024167.1733480075599 2024-12-06T10:14:35,610 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34143:34143),(127.0.0.1/127.0.0.1:42213:42213)] 2024-12-06T10:14:35,610 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167/552d6a33fa09%2C42775%2C1733480024167.1733480065546 is not closed yet, will try archiving it next time 2024-12-06T10:14:35,611 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167/552d6a33fa09%2C42775%2C1733480024167.1733480024574 to hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/oldWALs/552d6a33fa09%2C42775%2C1733480024167.1733480024574 2024-12-06T10:14:35,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741842_1018 (size=2520) 2024-12-06T10:14:35,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741842_1018 (size=2520) 2024-12-06T10:14:35,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T10:14:35,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T10:14:35,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-06T10:14:35,616 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-06T10:14:35,617 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T10:14:35,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T10:14:35,769 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,42775,1733480024167 2024-12-06T10:14:35,769 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-06T10:14:35,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:35,770 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 9b9745a5d3e30df4c291865480dafb79 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T10:14:35,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/a8f09b300a384966a10fc07f3f13152e is 1080, key is row0000/info:/1733480075598/Put/seqid=0 2024-12-06T10:14:35,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741846_1022 (size=6033) 2024-12-06T10:14:35,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741846_1022 (size=6033) 2024-12-06T10:14:35,780 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/a8f09b300a384966a10fc07f3f13152e 2024-12-06T10:14:35,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/a8f09b300a384966a10fc07f3f13152e as hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/a8f09b300a384966a10fc07f3f13152e 2024-12-06T10:14:35,791 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/a8f09b300a384966a10fc07f3f13152e, entries=1, sequenceid=18, filesize=5.9 K 2024-12-06T10:14:35,792 INFO [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9b9745a5d3e30df4c291865480dafb79 in 22ms, sequenceid=18, compaction requested=false 2024-12-06T10:14:35,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 9b9745a5d3e30df4c291865480dafb79: 2024-12-06T10:14:35,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:35,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-06T10:14:35,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-06T10:14:35,795 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-06T10:14:35,795 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-12-06T10:14:35,797 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-12-06T10:14:36,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:37,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:38,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:39,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:40,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:41,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:42,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:43,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:44,058 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:14:44,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:44,957 DEBUG [master/552d6a33fa09:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-06T10:14:44,957 DEBUG [master/552d6a33fa09:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 6e1422b1a6f851beaaa131ba6b1e204d changed from -1.0 to 0.0, refreshing cache 2024-12-06T10:14:45,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:45,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34949 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-06T10:14:45,617 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 20 completed 2024-12-06T10:14:45,619 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C42775%2C1733480024167.1733480085619 2024-12-06T10:14:45,626 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167/552d6a33fa09%2C42775%2C1733480024167.1733480075599 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167/552d6a33fa09%2C42775%2C1733480024167.1733480085619 2024-12-06T10:14:45,626 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34143:34143),(127.0.0.1/127.0.0.1:42213:42213)] 2024-12-06T10:14:45,626 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167/552d6a33fa09%2C42775%2C1733480024167.1733480075599 is not closed yet, will try archiving it next time 2024-12-06T10:14:45,626 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167/552d6a33fa09%2C42775%2C1733480024167.1733480065546 to hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/oldWALs/552d6a33fa09%2C42775%2C1733480024167.1733480065546 2024-12-06T10:14:45,626 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T10:14:45,626 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T10:14:45,627 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x20521892 to 127.0.0.1:51385 2024-12-06T10:14:45,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:14:45,627 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T10:14:45,627 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1426804301, stopped=false 2024-12-06T10:14:45,627 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=552d6a33fa09,34949,1733480024089 2024-12-06T10:14:45,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741845_1021 (size=2026) 2024-12-06T10:14:45,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741845_1021 (size=2026) 2024-12-06T10:14:45,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:14:45,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:45,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:14:45,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:45,629 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T10:14:45,629 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:14:45,629 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,42775,1733480024167' ***** 2024-12-06T10:14:45,629 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T10:14:45,629 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:14:45,630 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:14:45,631 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T10:14:45,631 INFO [RS:0;552d6a33fa09:42775 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T10:14:45,631 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T10:14:45,631 INFO [RS:0;552d6a33fa09:42775 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T10:14:45,631 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(3579): Received CLOSE for 9b9745a5d3e30df4c291865480dafb79 2024-12-06T10:14:45,631 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(3579): Received CLOSE for 6e1422b1a6f851beaaa131ba6b1e204d 2024-12-06T10:14:45,631 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,42775,1733480024167 2024-12-06T10:14:45,631 DEBUG [RS:0;552d6a33fa09:42775 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:14:45,631 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T10:14:45,631 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T10:14:45,631 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T10:14:45,631 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 9b9745a5d3e30df4c291865480dafb79, disabling compactions & flushes 2024-12-06T10:14:45,632 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T10:14:45,632 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:45,632 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:45,632 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. after waiting 0 ms 2024-12-06T10:14:45,632 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:45,632 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-06T10:14:45,632 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 9b9745a5d3e30df4c291865480dafb79 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T10:14:45,632 DEBUG [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(1603): Online Regions={9b9745a5d3e30df4c291865480dafb79=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79., 1588230740=hbase:meta,,1.1588230740, 6e1422b1a6f851beaaa131ba6b1e204d=hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d.} 2024-12-06T10:14:45,632 DEBUG [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 6e1422b1a6f851beaaa131ba6b1e204d, 9b9745a5d3e30df4c291865480dafb79 2024-12-06T10:14:45,632 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:14:45,632 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:14:45,632 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:14:45,632 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:14:45,632 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:14:45,632 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=3.05 KB heapSize=5.55 KB 2024-12-06T10:14:45,636 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/5dc7dc290e0545ed8f80b98335a65e21 is 1080, key is row0001/info:/1733480085618/Put/seqid=0 2024-12-06T10:14:45,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741848_1024 (size=6033) 2024-12-06T10:14:45,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741848_1024 (size=6033) 2024-12-06T10:14:45,643 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/5dc7dc290e0545ed8f80b98335a65e21 2024-12-06T10:14:45,649 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/.tmp/info/5dc7dc290e0545ed8f80b98335a65e21 as hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/5dc7dc290e0545ed8f80b98335a65e21 2024-12-06T10:14:45,651 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740/.tmp/info/c1b0b4d1b89748d68ee00f8e96bad331 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79./info:regioninfo/1733480025856/Put/seqid=0 2024-12-06T10:14:45,655 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/5dc7dc290e0545ed8f80b98335a65e21, entries=1, sequenceid=22, filesize=5.9 K 2024-12-06T10:14:45,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741849_1025 (size=8430) 2024-12-06T10:14:45,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741849_1025 (size=8430) 2024-12-06T10:14:45,657 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9b9745a5d3e30df4c291865480dafb79 in 25ms, sequenceid=22, compaction requested=true 2024-12-06T10:14:45,657 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740/.tmp/info/c1b0b4d1b89748d68ee00f8e96bad331 2024-12-06T10:14:45,657 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/c930eba9be2f40368ba5bbda6c339303, hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/395793d0519646d9bebcceb1479418a4, hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/186120730c47430abaeeef33b47ab27f] to archive 2024-12-06T10:14:45,658 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:14:45,660 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/c930eba9be2f40368ba5bbda6c339303 to hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/c930eba9be2f40368ba5bbda6c339303 2024-12-06T10:14:45,662 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/395793d0519646d9bebcceb1479418a4 to hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/395793d0519646d9bebcceb1479418a4 2024-12-06T10:14:45,663 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/186120730c47430abaeeef33b47ab27f to hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/info/186120730c47430abaeeef33b47ab27f 2024-12-06T10:14:45,676 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9b9745a5d3e30df4c291865480dafb79/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-06T10:14:45,677 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:45,677 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 9b9745a5d3e30df4c291865480dafb79: 2024-12-06T10:14:45,677 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733480025501.9b9745a5d3e30df4c291865480dafb79. 2024-12-06T10:14:45,677 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 6e1422b1a6f851beaaa131ba6b1e204d, disabling compactions & flushes 2024-12-06T10:14:45,677 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:14:45,677 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:14:45,677 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. after waiting 0 ms 2024-12-06T10:14:45,677 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:14:45,677 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740/.tmp/table/4699c8b449154b3e9cd9c139e93a23f2 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733480025863/Put/seqid=0 2024-12-06T10:14:45,681 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/namespace/6e1422b1a6f851beaaa131ba6b1e204d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-06T10:14:45,682 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:14:45,682 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 6e1422b1a6f851beaaa131ba6b1e204d: 2024-12-06T10:14:45,682 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733480024953.6e1422b1a6f851beaaa131ba6b1e204d. 2024-12-06T10:14:45,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741850_1026 (size=5532) 2024-12-06T10:14:45,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741850_1026 (size=5532) 2024-12-06T10:14:45,686 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=264 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740/.tmp/table/4699c8b449154b3e9cd9c139e93a23f2 2024-12-06T10:14:45,691 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740/.tmp/info/c1b0b4d1b89748d68ee00f8e96bad331 as hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740/info/c1b0b4d1b89748d68ee00f8e96bad331 2024-12-06T10:14:45,697 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740/info/c1b0b4d1b89748d68ee00f8e96bad331, entries=20, sequenceid=14, filesize=8.2 K 2024-12-06T10:14:45,697 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740/.tmp/table/4699c8b449154b3e9cd9c139e93a23f2 as hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740/table/4699c8b449154b3e9cd9c139e93a23f2 2024-12-06T10:14:45,702 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740/table/4699c8b449154b3e9cd9c139e93a23f2, entries=4, sequenceid=14, filesize=5.4 K 2024-12-06T10:14:45,703 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~3.05 KB/3122, heapSize ~5.27 KB/5400, currentSize=0 B/0 for 1588230740 in 71ms, sequenceid=14, compaction requested=false 2024-12-06T10:14:45,707 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-06T10:14:45,708 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T10:14:45,708 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T10:14:45,708 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:14:45,708 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T10:14:45,832 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,42775,1733480024167; all regions closed. 2024-12-06T10:14:45,833 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167 2024-12-06T10:14:45,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741834_1010 (size=4570) 2024-12-06T10:14:45,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741834_1010 (size=4570) 2024-12-06T10:14:45,837 DEBUG [RS:0;552d6a33fa09:42775 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/oldWALs 2024-12-06T10:14:45,837 INFO [RS:0;552d6a33fa09:42775 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 552d6a33fa09%2C42775%2C1733480024167.meta:.meta(num 1733480024914) 2024-12-06T10:14:45,837 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/WALs/552d6a33fa09,42775,1733480024167 2024-12-06T10:14:45,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741847_1023 (size=1545) 2024-12-06T10:14:45,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741847_1023 (size=1545) 2024-12-06T10:14:45,842 DEBUG [RS:0;552d6a33fa09:42775 {}] wal.AbstractFSWAL(1071): Moved 2 WAL file(s) to /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/oldWALs 2024-12-06T10:14:45,842 INFO [RS:0;552d6a33fa09:42775 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 552d6a33fa09%2C42775%2C1733480024167:(num 1733480085619) 2024-12-06T10:14:45,842 DEBUG [RS:0;552d6a33fa09:42775 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:14:45,842 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:14:45,842 INFO [RS:0;552d6a33fa09:42775 {}] hbase.ChoreService(370): Chore service for: regionserver/552d6a33fa09:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T10:14:45,842 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:14:45,843 INFO [RS:0;552d6a33fa09:42775 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42775 2024-12-06T10:14:45,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/552d6a33fa09,42775,1733480024167 2024-12-06T10:14:45,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:14:45,846 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [552d6a33fa09,42775,1733480024167] 2024-12-06T10:14:45,846 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 552d6a33fa09,42775,1733480024167; numProcessing=1 2024-12-06T10:14:45,847 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/552d6a33fa09,42775,1733480024167 already deleted, retry=false 2024-12-06T10:14:45,847 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 552d6a33fa09,42775,1733480024167 expired; onlineServers=0 2024-12-06T10:14:45,847 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,34949,1733480024089' ***** 2024-12-06T10:14:45,847 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T10:14:45,847 DEBUG [M:0;552d6a33fa09:34949 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1425e432, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:14:45,847 INFO [M:0;552d6a33fa09:34949 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,34949,1733480024089 2024-12-06T10:14:45,847 INFO [M:0;552d6a33fa09:34949 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,34949,1733480024089; all regions closed. 2024-12-06T10:14:45,847 DEBUG [M:0;552d6a33fa09:34949 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:14:45,847 DEBUG [M:0;552d6a33fa09:34949 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T10:14:45,847 DEBUG [M:0;552d6a33fa09:34949 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T10:14:45,847 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T10:14:45,847 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733480024313 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733480024313,5,FailOnTimeoutGroup] 2024-12-06T10:14:45,847 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733480024314 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733480024314,5,FailOnTimeoutGroup] 2024-12-06T10:14:45,848 INFO [M:0;552d6a33fa09:34949 {}] hbase.ChoreService(370): Chore service for: master/552d6a33fa09:0 had [] on shutdown 2024-12-06T10:14:45,848 DEBUG [M:0;552d6a33fa09:34949 {}] master.HMaster(1733): Stopping service threads 2024-12-06T10:14:45,848 INFO [M:0;552d6a33fa09:34949 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T10:14:45,848 INFO [M:0;552d6a33fa09:34949 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T10:14:45,848 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T10:14:45,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T10:14:45,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:45,848 DEBUG [M:0;552d6a33fa09:34949 {}] zookeeper.ZKUtil(347): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T10:14:45,849 WARN [M:0;552d6a33fa09:34949 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T10:14:45,849 INFO [M:0;552d6a33fa09:34949 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T10:14:45,849 INFO [M:0;552d6a33fa09:34949 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T10:14:45,849 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:14:45,849 DEBUG [M:0;552d6a33fa09:34949 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:14:45,849 INFO [M:0;552d6a33fa09:34949 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:14:45,849 DEBUG [M:0;552d6a33fa09:34949 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:14:45,849 DEBUG [M:0;552d6a33fa09:34949 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:14:45,849 DEBUG [M:0;552d6a33fa09:34949 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:14:45,849 INFO [M:0;552d6a33fa09:34949 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=65.10 KB heapSize=81.72 KB 2024-12-06T10:14:45,865 DEBUG [M:0;552d6a33fa09:34949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc9d0d4865f240938e49e5b28af019c2 is 82, key is hbase:meta,,1/info:regioninfo/1733480024933/Put/seqid=0 2024-12-06T10:14:45,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741851_1027 (size=5672) 2024-12-06T10:14:45,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741851_1027 (size=5672) 2024-12-06T10:14:45,870 INFO [M:0;552d6a33fa09:34949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc9d0d4865f240938e49e5b28af019c2 2024-12-06T10:14:45,891 DEBUG [M:0;552d6a33fa09:34949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5cab578c042c41e3bc2d3e75866ae2e8 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733480025868/Put/seqid=0 2024-12-06T10:14:45,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741852_1028 (size=8358) 2024-12-06T10:14:45,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741852_1028 (size=8358) 2024-12-06T10:14:45,896 INFO [M:0;552d6a33fa09:34949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.49 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5cab578c042c41e3bc2d3e75866ae2e8 2024-12-06T10:14:45,901 INFO [M:0;552d6a33fa09:34949 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5cab578c042c41e3bc2d3e75866ae2e8 2024-12-06T10:14:45,915 DEBUG [M:0;552d6a33fa09:34949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fb9ec76f9e4548bdb45e91c49843f924 is 69, key is 552d6a33fa09,42775,1733480024167/rs:state/1733480024402/Put/seqid=0 2024-12-06T10:14:45,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741853_1029 (size=5156) 2024-12-06T10:14:45,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741853_1029 (size=5156) 2024-12-06T10:14:45,922 INFO [M:0;552d6a33fa09:34949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fb9ec76f9e4548bdb45e91c49843f924 2024-12-06T10:14:45,941 DEBUG [M:0;552d6a33fa09:34949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/88bdd2bdef7d414bb17ab6b1a4acfe9b is 52, key is load_balancer_on/state:d/1733480025494/Put/seqid=0 2024-12-06T10:14:45,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741854_1030 (size=5056) 2024-12-06T10:14:45,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741854_1030 (size=5056) 2024-12-06T10:14:45,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:14:45,946 INFO [RS:0;552d6a33fa09:42775 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,42775,1733480024167; zookeeper connection closed. 2024-12-06T10:14:45,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42775-0x10066d1c0e60001, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:14:45,946 INFO [M:0;552d6a33fa09:34949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/88bdd2bdef7d414bb17ab6b1a4acfe9b 2024-12-06T10:14:45,946 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@52ba0516 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@52ba0516 2024-12-06T10:14:45,947 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T10:14:45,951 DEBUG [M:0;552d6a33fa09:34949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc9d0d4865f240938e49e5b28af019c2 as hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fc9d0d4865f240938e49e5b28af019c2 2024-12-06T10:14:45,957 INFO [M:0;552d6a33fa09:34949 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fc9d0d4865f240938e49e5b28af019c2, entries=8, sequenceid=184, filesize=5.5 K 2024-12-06T10:14:45,958 DEBUG [M:0;552d6a33fa09:34949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5cab578c042c41e3bc2d3e75866ae2e8 as hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5cab578c042c41e3bc2d3e75866ae2e8 2024-12-06T10:14:45,963 INFO [M:0;552d6a33fa09:34949 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5cab578c042c41e3bc2d3e75866ae2e8 2024-12-06T10:14:45,963 INFO [M:0;552d6a33fa09:34949 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5cab578c042c41e3bc2d3e75866ae2e8, entries=21, sequenceid=184, filesize=8.2 K 2024-12-06T10:14:45,964 DEBUG [M:0;552d6a33fa09:34949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fb9ec76f9e4548bdb45e91c49843f924 as hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fb9ec76f9e4548bdb45e91c49843f924 2024-12-06T10:14:45,969 INFO [M:0;552d6a33fa09:34949 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fb9ec76f9e4548bdb45e91c49843f924, entries=1, sequenceid=184, filesize=5.0 K 2024-12-06T10:14:45,970 DEBUG [M:0;552d6a33fa09:34949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/88bdd2bdef7d414bb17ab6b1a4acfe9b as hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/88bdd2bdef7d414bb17ab6b1a4acfe9b 2024-12-06T10:14:45,975 INFO [M:0;552d6a33fa09:34949 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43379/user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/88bdd2bdef7d414bb17ab6b1a4acfe9b, entries=1, sequenceid=184, filesize=4.9 K 2024-12-06T10:14:45,976 INFO [M:0;552d6a33fa09:34949 {}] regionserver.HRegion(3040): Finished flush of dataSize ~65.10 KB/66658, heapSize ~81.66 KB/83616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=184, compaction requested=false 2024-12-06T10:14:45,979 INFO [M:0;552d6a33fa09:34949 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:14:45,979 DEBUG [M:0;552d6a33fa09:34949 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:14:45,979 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/1f364f91-9715-c814-b82e-9f09edaf15d7/MasterData/WALs/552d6a33fa09,34949,1733480024089 2024-12-06T10:14:45,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36003 is added to blk_1073741830_1006 (size=79179) 2024-12-06T10:14:45,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40655 is added to blk_1073741830_1006 (size=79179) 2024-12-06T10:14:45,981 INFO [M:0;552d6a33fa09:34949 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T10:14:45,981 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:14:45,982 INFO [M:0;552d6a33fa09:34949 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:34949 2024-12-06T10:14:45,983 DEBUG [M:0;552d6a33fa09:34949 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/552d6a33fa09,34949,1733480024089 already deleted, retry=false 2024-12-06T10:14:46,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:14:46,085 INFO [M:0;552d6a33fa09:34949 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,34949,1733480024089; zookeeper connection closed. 2024-12-06T10:14:46,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34949-0x10066d1c0e60000, quorum=127.0.0.1:51385, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:14:46,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6a5f9711{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:14:46,088 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13fdc1a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:14:46,088 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:14:46,088 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4249af65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:14:46,088 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4448dbfe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/hadoop.log.dir/,STOPPED} 2024-12-06T10:14:46,089 WARN [BP-32812406-172.17.0.2-1733480023371 heartbeating to localhost/127.0.0.1:43379 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:14:46,089 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:14:46,089 WARN [BP-32812406-172.17.0.2-1733480023371 heartbeating to localhost/127.0.0.1:43379 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-32812406-172.17.0.2-1733480023371 (Datanode Uuid 45dd6cc8-6660-43a7-b15e-8b93934fe17b) service to localhost/127.0.0.1:43379 2024-12-06T10:14:46,089 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:14:46,090 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/cluster_2c428629-78ca-38f4-f743-64d77fccb27f/dfs/data/data3/current/BP-32812406-172.17.0.2-1733480023371 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:14:46,090 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/cluster_2c428629-78ca-38f4-f743-64d77fccb27f/dfs/data/data4/current/BP-32812406-172.17.0.2-1733480023371 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:14:46,090 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:14:46,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6fb68f59{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:14:46,092 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2a1e42bd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:14:46,092 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:14:46,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47065cf1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:14:46,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@105e54da{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/hadoop.log.dir/,STOPPED} 2024-12-06T10:14:46,094 WARN [BP-32812406-172.17.0.2-1733480023371 heartbeating to localhost/127.0.0.1:43379 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:14:46,094 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:14:46,094 WARN [BP-32812406-172.17.0.2-1733480023371 heartbeating to localhost/127.0.0.1:43379 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-32812406-172.17.0.2-1733480023371 (Datanode Uuid d7d71029-613d-4956-b3af-08f8d4c8d19f) service to localhost/127.0.0.1:43379 2024-12-06T10:14:46,094 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:14:46,094 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/cluster_2c428629-78ca-38f4-f743-64d77fccb27f/dfs/data/data1/current/BP-32812406-172.17.0.2-1733480023371 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:14:46,095 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/cluster_2c428629-78ca-38f4-f743-64d77fccb27f/dfs/data/data2/current/BP-32812406-172.17.0.2-1733480023371 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:14:46,095 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:14:46,101 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@20b89acb{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:14:46,102 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f9c06d4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:14:46,102 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:14:46,102 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d73ede7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:14:46,102 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15bb8b05{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/hadoop.log.dir/,STOPPED} 2024-12-06T10:14:46,108 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T10:14:46,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-06T10:14:46,135 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=110 (was 100) - Thread LEAK? -, OpenFileDescriptor=464 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=126 (was 153), ProcessCount=11 (was 11), AvailableMemoryMB=6523 (was 6674) 2024-12-06T10:14:46,142 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=111, OpenFileDescriptor=464, MaxFileDescriptor=1048576, SystemLoadAverage=126, ProcessCount=11, AvailableMemoryMB=6523 2024-12-06T10:14:46,142 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T10:14:46,142 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/hadoop.log.dir so I do NOT create it in target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f 2024-12-06T10:14:46,142 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4645015d-4d6c-b5af-fc2e-9b70fc09cb53/hadoop.tmp.dir so I do NOT create it in target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f 2024-12-06T10:14:46,143 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/cluster_f47b1158-56be-0951-f760-b117b6f7ac93, deleteOnExit=true 2024-12-06T10:14:46,143 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T10:14:46,143 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/test.cache.data in system properties and HBase conf 2024-12-06T10:14:46,143 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T10:14:46,143 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/hadoop.log.dir in system properties and HBase conf 2024-12-06T10:14:46,143 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T10:14:46,143 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T10:14:46,143 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T10:14:46,143 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T10:14:46,143 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:14:46,144 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:14:46,144 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T10:14:46,144 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:14:46,144 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T10:14:46,144 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T10:14:46,144 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:14:46,144 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:14:46,144 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T10:14:46,144 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/nfs.dump.dir in system properties and HBase conf 2024-12-06T10:14:46,144 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/java.io.tmpdir in system properties and HBase conf 2024-12-06T10:14:46,144 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:14:46,144 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T10:14:46,144 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T10:14:46,158 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T10:14:46,221 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:14:46,225 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:14:46,226 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:14:46,226 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:14:46,226 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:14:46,226 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:14:46,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a33d33a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:14:46,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1849e93e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:14:46,340 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@491e4eb5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/java.io.tmpdir/jetty-localhost-42853-hadoop-hdfs-3_4_1-tests_jar-_-any-59654355500741961/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:14:46,341 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e57ffb4{HTTP/1.1, (http/1.1)}{localhost:42853} 2024-12-06T10:14:46,341 INFO [Time-limited test {}] server.Server(415): Started @287633ms 2024-12-06T10:14:46,354 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T10:14:46,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:46,405 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:14:46,408 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:14:46,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:14:46,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:14:46,409 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:14:46,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44bd9a2a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:14:46,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4aeec8df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:14:46,425 INFO [regionserver/552d6a33fa09:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:14:46,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60d32c96{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/java.io.tmpdir/jetty-localhost-40763-hadoop-hdfs-3_4_1-tests_jar-_-any-1651851247622188171/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:14:46,528 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@407f2320{HTTP/1.1, (http/1.1)}{localhost:40763} 2024-12-06T10:14:46,528 INFO [Time-limited test {}] server.Server(415): Started @287820ms 2024-12-06T10:14:46,529 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:14:46,561 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:14:46,563 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:14:46,564 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:14:46,564 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:14:46,564 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:14:46,565 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15bff48f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:14:46,565 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@52ad931f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:14:46,628 WARN [Thread-1688 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/cluster_f47b1158-56be-0951-f760-b117b6f7ac93/dfs/data/data1/current/BP-1503095463-172.17.0.2-1733480086169/current, will proceed with Du for space computation calculation, 2024-12-06T10:14:46,628 WARN [Thread-1689 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/cluster_f47b1158-56be-0951-f760-b117b6f7ac93/dfs/data/data2/current/BP-1503095463-172.17.0.2-1733480086169/current, will proceed with Du for space computation calculation, 2024-12-06T10:14:46,661 WARN [Thread-1667 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:14:46,664 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6b284726bf7168ea with lease ID 0x4e078295623f6e51: Processing first storage report for DS-12ddf914-d405-4152-b509-06917da15670 from datanode DatanodeRegistration(127.0.0.1:43767, datanodeUuid=c7cc0e52-ac7a-4413-8cf7-4bd2f7990889, infoPort=37659, infoSecurePort=0, ipcPort=32943, storageInfo=lv=-57;cid=testClusterID;nsid=1566248880;c=1733480086169) 2024-12-06T10:14:46,665 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6b284726bf7168ea with lease ID 0x4e078295623f6e51: from storage DS-12ddf914-d405-4152-b509-06917da15670 node DatanodeRegistration(127.0.0.1:43767, datanodeUuid=c7cc0e52-ac7a-4413-8cf7-4bd2f7990889, infoPort=37659, infoSecurePort=0, ipcPort=32943, storageInfo=lv=-57;cid=testClusterID;nsid=1566248880;c=1733480086169), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:14:46,665 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6b284726bf7168ea with lease ID 0x4e078295623f6e51: Processing first storage report for DS-fcfac2bd-3a24-480d-a166-f3feca1d0b71 from datanode DatanodeRegistration(127.0.0.1:43767, datanodeUuid=c7cc0e52-ac7a-4413-8cf7-4bd2f7990889, infoPort=37659, infoSecurePort=0, ipcPort=32943, storageInfo=lv=-57;cid=testClusterID;nsid=1566248880;c=1733480086169) 2024-12-06T10:14:46,665 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6b284726bf7168ea with lease ID 0x4e078295623f6e51: from storage DS-fcfac2bd-3a24-480d-a166-f3feca1d0b71 node DatanodeRegistration(127.0.0.1:43767, datanodeUuid=c7cc0e52-ac7a-4413-8cf7-4bd2f7990889, infoPort=37659, infoSecurePort=0, ipcPort=32943, storageInfo=lv=-57;cid=testClusterID;nsid=1566248880;c=1733480086169), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T10:14:46,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59d12a70{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/java.io.tmpdir/jetty-localhost-34295-hadoop-hdfs-3_4_1-tests_jar-_-any-17275204721552084141/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:14:46,680 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@57c0ad86{HTTP/1.1, (http/1.1)}{localhost:34295} 2024-12-06T10:14:46,680 INFO [Time-limited test {}] server.Server(415): Started @287972ms 2024-12-06T10:14:46,682 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:14:46,769 WARN [Thread-1714 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/cluster_f47b1158-56be-0951-f760-b117b6f7ac93/dfs/data/data3/current/BP-1503095463-172.17.0.2-1733480086169/current, will proceed with Du for space computation calculation, 2024-12-06T10:14:46,769 WARN [Thread-1715 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/cluster_f47b1158-56be-0951-f760-b117b6f7ac93/dfs/data/data4/current/BP-1503095463-172.17.0.2-1733480086169/current, will proceed with Du for space computation calculation, 2024-12-06T10:14:46,792 WARN [Thread-1703 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:14:46,794 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x28b0924c322ce556 with lease ID 0x4e078295623f6e52: Processing first storage report for DS-854c8628-3123-457c-a43d-e76452ab3bb7 from datanode DatanodeRegistration(127.0.0.1:36519, datanodeUuid=09e2e145-e660-4237-b5eb-a2ef5392eb25, infoPort=32825, infoSecurePort=0, ipcPort=40591, storageInfo=lv=-57;cid=testClusterID;nsid=1566248880;c=1733480086169) 2024-12-06T10:14:46,794 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x28b0924c322ce556 with lease ID 0x4e078295623f6e52: from storage DS-854c8628-3123-457c-a43d-e76452ab3bb7 node DatanodeRegistration(127.0.0.1:36519, datanodeUuid=09e2e145-e660-4237-b5eb-a2ef5392eb25, infoPort=32825, infoSecurePort=0, ipcPort=40591, storageInfo=lv=-57;cid=testClusterID;nsid=1566248880;c=1733480086169), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:14:46,795 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x28b0924c322ce556 with lease ID 0x4e078295623f6e52: Processing first storage report for DS-0463e459-eec3-4db6-9778-e598229bbef2 from datanode DatanodeRegistration(127.0.0.1:36519, datanodeUuid=09e2e145-e660-4237-b5eb-a2ef5392eb25, infoPort=32825, infoSecurePort=0, ipcPort=40591, storageInfo=lv=-57;cid=testClusterID;nsid=1566248880;c=1733480086169) 2024-12-06T10:14:46,795 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x28b0924c322ce556 with lease ID 0x4e078295623f6e52: from storage DS-0463e459-eec3-4db6-9778-e598229bbef2 node DatanodeRegistration(127.0.0.1:36519, datanodeUuid=09e2e145-e660-4237-b5eb-a2ef5392eb25, infoPort=32825, infoSecurePort=0, ipcPort=40591, storageInfo=lv=-57;cid=testClusterID;nsid=1566248880;c=1733480086169), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:14:46,806 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f 2024-12-06T10:14:46,808 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/cluster_f47b1158-56be-0951-f760-b117b6f7ac93/zookeeper_0, clientPort=60928, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/cluster_f47b1158-56be-0951-f760-b117b6f7ac93/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/cluster_f47b1158-56be-0951-f760-b117b6f7ac93/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T10:14:46,809 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=60928 2024-12-06T10:14:46,809 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:14:46,811 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:14:46,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741825_1001 (size=7) 2024-12-06T10:14:46,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741825_1001 (size=7) 2024-12-06T10:14:46,821 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9 with version=8 2024-12-06T10:14:46,821 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/hbase-staging 2024-12-06T10:14:46,823 INFO [Time-limited test {}] client.ConnectionUtils(129): master/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:14:46,823 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:14:46,824 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:14:46,824 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:14:46,824 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:14:46,824 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:14:46,824 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:14:46,824 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:14:46,824 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:34115 2024-12-06T10:14:46,825 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:14:46,826 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:14:46,828 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:34115 connecting to ZooKeeper ensemble=127.0.0.1:60928 2024-12-06T10:14:46,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:341150x0, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:14:46,833 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34115-0x10066d2b5fe0000 connected 2024-12-06T10:14:46,845 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:14:46,845 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:14:46,846 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:14:46,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34115 2024-12-06T10:14:46,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34115 2024-12-06T10:14:46,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34115 2024-12-06T10:14:46,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34115 2024-12-06T10:14:46,849 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34115 2024-12-06T10:14:46,849 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9, hbase.cluster.distributed=false 2024-12-06T10:14:46,873 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:14:46,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:14:46,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:14:46,873 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:14:46,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:14:46,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:14:46,873 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:14:46,874 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:14:46,874 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33905 2024-12-06T10:14:46,875 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T10:14:46,875 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T10:14:46,876 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:14:46,878 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:14:46,881 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33905 connecting to ZooKeeper ensemble=127.0.0.1:60928 2024-12-06T10:14:46,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:339050x0, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:14:46,884 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33905-0x10066d2b5fe0001 connected 2024-12-06T10:14:46,884 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:14:46,884 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:14:46,885 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:14:46,885 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33905 2024-12-06T10:14:46,885 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33905 2024-12-06T10:14:46,885 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33905 2024-12-06T10:14:46,886 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33905 2024-12-06T10:14:46,886 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33905 2024-12-06T10:14:46,887 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/552d6a33fa09,34115,1733480086823 2024-12-06T10:14:46,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:14:46,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:14:46,888 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/552d6a33fa09,34115,1733480086823 2024-12-06T10:14:46,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:14:46,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:14:46,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:46,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:46,890 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:14:46,890 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/552d6a33fa09,34115,1733480086823 from backup master directory 2024-12-06T10:14:46,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/552d6a33fa09,34115,1733480086823 2024-12-06T10:14:46,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:14:46,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:14:46,892 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:14:46,892 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=552d6a33fa09,34115,1733480086823 2024-12-06T10:14:46,892 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:14:46,902 DEBUG [M:0;552d6a33fa09:34115 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;552d6a33fa09:34115 2024-12-06T10:14:46,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741826_1002 (size=42) 2024-12-06T10:14:46,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741826_1002 (size=42) 2024-12-06T10:14:46,905 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/hbase.id with ID: 9d1a257b-c2ee-47ba-97fd-309c20863c60 2024-12-06T10:14:46,914 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:14:46,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:46,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:46,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741827_1003 (size=196) 2024-12-06T10:14:46,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741827_1003 (size=196) 2024-12-06T10:14:46,924 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:14:46,924 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T10:14:46,924 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:14:46,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:14:46,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:14:46,932 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store 2024-12-06T10:14:46,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741829_1005 (size=34) 2024-12-06T10:14:46,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741829_1005 (size=34) 2024-12-06T10:14:46,938 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:14:46,938 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:14:46,938 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:14:46,938 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:14:46,938 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:14:46,938 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:14:46,938 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:14:46,938 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:14:46,939 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/.initializing 2024-12-06T10:14:46,939 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/WALs/552d6a33fa09,34115,1733480086823 2024-12-06T10:14:46,941 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C34115%2C1733480086823, suffix=, logDir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/WALs/552d6a33fa09,34115,1733480086823, archiveDir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/oldWALs, maxLogs=10 2024-12-06T10:14:46,942 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C34115%2C1733480086823.1733480086942 2024-12-06T10:14:46,948 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/WALs/552d6a33fa09,34115,1733480086823/552d6a33fa09%2C34115%2C1733480086823.1733480086942 2024-12-06T10:14:46,948 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32825:32825),(127.0.0.1/127.0.0.1:37659:37659)] 2024-12-06T10:14:46,948 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:14:46,948 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:14:46,948 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:14:46,948 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:14:46,950 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:14:46,951 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T10:14:46,951 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:46,952 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:14:46,952 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:14:46,953 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T10:14:46,953 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:46,953 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:14:46,953 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:14:46,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T10:14:46,954 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:46,955 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:14:46,955 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:14:46,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T10:14:46,956 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:46,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:14:46,957 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:14:46,957 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:14:46,959 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T10:14:46,960 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:14:46,962 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:14:46,962 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794786, jitterRate=0.010623425245285034}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T10:14:46,962 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:14:46,963 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T10:14:46,966 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7355440a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:14:46,966 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T10:14:46,966 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T10:14:46,966 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T10:14:46,967 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T10:14:46,967 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T10:14:46,967 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-06T10:14:46,967 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T10:14:46,969 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T10:14:46,970 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T10:14:46,971 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T10:14:46,971 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T10:14:46,971 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T10:14:46,972 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T10:14:46,973 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T10:14:46,973 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T10:14:46,974 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T10:14:46,975 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T10:14:46,976 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T10:14:46,977 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T10:14:46,978 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T10:14:46,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:14:46,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:14:46,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:46,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:46,980 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=552d6a33fa09,34115,1733480086823, sessionid=0x10066d2b5fe0000, setting cluster-up flag (Was=false) 2024-12-06T10:14:46,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:46,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:46,986 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T10:14:46,986 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,34115,1733480086823 2024-12-06T10:14:46,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:46,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:46,992 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T10:14:46,993 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,34115,1733480086823 2024-12-06T10:14:46,995 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T10:14:46,995 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T10:14:46,996 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T10:14:46,996 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 552d6a33fa09,34115,1733480086823 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T10:14:46,996 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:14:46,996 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:14:46,996 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:14:46,996 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:14:46,996 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/552d6a33fa09:0, corePoolSize=10, maxPoolSize=10 2024-12-06T10:14:46,996 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:14:46,996 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:14:46,996 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:14:46,997 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733480116997 2024-12-06T10:14:46,997 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T10:14:46,997 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T10:14:46,997 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T10:14:46,997 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:14:46,997 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T10:14:46,997 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T10:14:46,997 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T10:14:46,997 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T10:14:46,998 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:46,998 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T10:14:46,998 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T10:14:46,998 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T10:14:46,998 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:46,998 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T10:14:46,998 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T10:14:46,998 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:14:46,999 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733480086999,5,FailOnTimeoutGroup] 2024-12-06T10:14:46,999 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733480086999,5,FailOnTimeoutGroup] 2024-12-06T10:14:46,999 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:46,999 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T10:14:46,999 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:46,999 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:47,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741831_1007 (size=1039) 2024-12-06T10:14:47,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741831_1007 (size=1039) 2024-12-06T10:14:47,010 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T10:14:47,010 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9 2024-12-06T10:14:47,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741832_1008 (size=32) 2024-12-06T10:14:47,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741832_1008 (size=32) 2024-12-06T10:14:47,016 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:14:47,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:14:47,019 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:14:47,019 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:47,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:14:47,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:14:47,021 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:14:47,021 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:47,021 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:14:47,021 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:14:47,022 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:14:47,022 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:47,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:14:47,023 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740 2024-12-06T10:14:47,024 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740 2024-12-06T10:14:47,025 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:14:47,026 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:14:47,028 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:14:47,028 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=865724, jitterRate=0.10082553327083588}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:14:47,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:14:47,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:14:47,028 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:14:47,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:14:47,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:14:47,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:14:47,029 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T10:14:47,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:14:47,030 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:14:47,030 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T10:14:47,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T10:14:47,031 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T10:14:47,032 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T10:14:47,099 DEBUG [RS:0;552d6a33fa09:33905 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;552d6a33fa09:33905 2024-12-06T10:14:47,099 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(1008): ClusterId : 9d1a257b-c2ee-47ba-97fd-309c20863c60 2024-12-06T10:14:47,100 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T10:14:47,102 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T10:14:47,102 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T10:14:47,103 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T10:14:47,104 DEBUG [RS:0;552d6a33fa09:33905 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@582f8dc1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:14:47,104 DEBUG [RS:0;552d6a33fa09:33905 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cab86d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:14:47,104 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T10:14:47,104 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T10:14:47,104 DEBUG [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T10:14:47,105 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(3073): reportForDuty to master=552d6a33fa09,34115,1733480086823 with isa=552d6a33fa09/172.17.0.2:33905, startcode=1733480086872 2024-12-06T10:14:47,105 DEBUG [RS:0;552d6a33fa09:33905 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:14:47,107 INFO [RS-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48437, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:14:47,107 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34115 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 552d6a33fa09,33905,1733480086872 2024-12-06T10:14:47,107 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34115 {}] master.ServerManager(486): Registering regionserver=552d6a33fa09,33905,1733480086872 2024-12-06T10:14:47,109 DEBUG [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9 2024-12-06T10:14:47,109 DEBUG [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:45939 2024-12-06T10:14:47,109 DEBUG [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T10:14:47,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:14:47,111 DEBUG [RS:0;552d6a33fa09:33905 {}] zookeeper.ZKUtil(111): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/552d6a33fa09,33905,1733480086872 2024-12-06T10:14:47,111 WARN [RS:0;552d6a33fa09:33905 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:14:47,111 INFO [RS:0;552d6a33fa09:33905 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:14:47,111 DEBUG [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872 2024-12-06T10:14:47,111 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [552d6a33fa09,33905,1733480086872] 2024-12-06T10:14:47,114 DEBUG [RS:0;552d6a33fa09:33905 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T10:14:47,114 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T10:14:47,116 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T10:14:47,116 INFO [RS:0;552d6a33fa09:33905 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T10:14:47,116 INFO [RS:0;552d6a33fa09:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:47,118 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T10:14:47,119 INFO [RS:0;552d6a33fa09:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:47,119 DEBUG [RS:0;552d6a33fa09:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:14:47,119 DEBUG [RS:0;552d6a33fa09:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:14:47,119 DEBUG [RS:0;552d6a33fa09:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:14:47,119 DEBUG [RS:0;552d6a33fa09:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:14:47,119 DEBUG [RS:0;552d6a33fa09:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:14:47,119 DEBUG [RS:0;552d6a33fa09:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:14:47,119 DEBUG [RS:0;552d6a33fa09:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:14:47,119 DEBUG [RS:0;552d6a33fa09:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:14:47,120 DEBUG [RS:0;552d6a33fa09:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:14:47,120 DEBUG [RS:0;552d6a33fa09:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:14:47,120 DEBUG [RS:0;552d6a33fa09:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:14:47,120 DEBUG [RS:0;552d6a33fa09:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:14:47,120 DEBUG [RS:0;552d6a33fa09:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:14:47,120 INFO [RS:0;552d6a33fa09:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:47,120 INFO [RS:0;552d6a33fa09:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:47,120 INFO [RS:0;552d6a33fa09:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:47,120 INFO [RS:0;552d6a33fa09:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:47,120 INFO [RS:0;552d6a33fa09:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,33905,1733480086872-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:14:47,135 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T10:14:47,135 INFO [RS:0;552d6a33fa09:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,33905,1733480086872-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:47,149 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.Replication(204): 552d6a33fa09,33905,1733480086872 started 2024-12-06T10:14:47,149 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(1767): Serving as 552d6a33fa09,33905,1733480086872, RpcServer on 552d6a33fa09/172.17.0.2:33905, sessionid=0x10066d2b5fe0001 2024-12-06T10:14:47,150 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T10:14:47,150 DEBUG [RS:0;552d6a33fa09:33905 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 552d6a33fa09,33905,1733480086872 2024-12-06T10:14:47,150 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,33905,1733480086872' 2024-12-06T10:14:47,150 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T10:14:47,150 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T10:14:47,150 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T10:14:47,151 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T10:14:47,151 DEBUG [RS:0;552d6a33fa09:33905 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 552d6a33fa09,33905,1733480086872 2024-12-06T10:14:47,151 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,33905,1733480086872' 2024-12-06T10:14:47,151 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T10:14:47,151 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T10:14:47,151 DEBUG [RS:0;552d6a33fa09:33905 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T10:14:47,151 INFO [RS:0;552d6a33fa09:33905 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T10:14:47,151 INFO [RS:0;552d6a33fa09:33905 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T10:14:47,182 WARN [552d6a33fa09:34115 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-06T10:14:47,253 INFO [RS:0;552d6a33fa09:33905 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C33905%2C1733480086872, suffix=, logDir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872, archiveDir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/oldWALs, maxLogs=32 2024-12-06T10:14:47,254 INFO [RS:0;552d6a33fa09:33905 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C33905%2C1733480086872.1733480087254 2024-12-06T10:14:47,262 INFO [RS:0;552d6a33fa09:33905 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872/552d6a33fa09%2C33905%2C1733480086872.1733480087254 2024-12-06T10:14:47,262 DEBUG [RS:0;552d6a33fa09:33905 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32825:32825),(127.0.0.1/127.0.0.1:37659:37659)] 2024-12-06T10:14:47,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:47,432 DEBUG [552d6a33fa09:34115 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T10:14:47,433 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=552d6a33fa09,33905,1733480086872 2024-12-06T10:14:47,434 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,33905,1733480086872, state=OPENING 2024-12-06T10:14:47,435 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T10:14:47,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:47,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:47,437 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=552d6a33fa09,33905,1733480086872}] 2024-12-06T10:14:47,437 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:14:47,437 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:14:47,590 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33905,1733480086872 2024-12-06T10:14:47,590 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T10:14:47,592 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36182, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T10:14:47,596 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T10:14:47,596 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:14:47,598 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C33905%2C1733480086872.meta, suffix=.meta, logDir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872, archiveDir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/oldWALs, maxLogs=32 2024-12-06T10:14:47,598 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C33905%2C1733480086872.meta.1733480087598.meta 2024-12-06T10:14:47,603 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872/552d6a33fa09%2C33905%2C1733480086872.meta.1733480087598.meta 2024-12-06T10:14:47,603 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37659:37659),(127.0.0.1/127.0.0.1:32825:32825)] 2024-12-06T10:14:47,603 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:14:47,603 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T10:14:47,604 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T10:14:47,604 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T10:14:47,604 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T10:14:47,604 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:14:47,604 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T10:14:47,604 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T10:14:47,605 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:14:47,606 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:14:47,606 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:47,606 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:14:47,606 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:14:47,607 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:14:47,607 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:47,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:14:47,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:14:47,608 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:14:47,609 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:47,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:14:47,609 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740 2024-12-06T10:14:47,610 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740 2024-12-06T10:14:47,612 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:14:47,613 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:14:47,613 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764550, jitterRate=-0.027825266122817993}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:14:47,614 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:14:47,614 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733480087590 2024-12-06T10:14:47,616 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T10:14:47,616 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T10:14:47,617 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,33905,1733480086872 2024-12-06T10:14:47,617 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,33905,1733480086872, state=OPEN 2024-12-06T10:14:47,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:14:47,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:14:47,621 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:14:47,621 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:14:47,623 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T10:14:47,623 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=552d6a33fa09,33905,1733480086872 in 184 msec 2024-12-06T10:14:47,625 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T10:14:47,625 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 593 msec 2024-12-06T10:14:47,626 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 631 msec 2024-12-06T10:14:47,626 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733480087626, completionTime=-1 2024-12-06T10:14:47,627 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T10:14:47,627 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T10:14:47,627 DEBUG [hconnection-0x19ce33f-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:14:47,629 INFO [RS-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:14:47,629 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T10:14:47,629 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733480147629 2024-12-06T10:14:47,629 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733480207629 2024-12-06T10:14:47,630 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 2 msec 2024-12-06T10:14:47,634 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,34115,1733480086823-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:47,634 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,34115,1733480086823-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:47,634 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,34115,1733480086823-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:47,634 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-552d6a33fa09:34115, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:47,635 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:47,635 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T10:14:47,635 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:14:47,636 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T10:14:47,636 DEBUG [master/552d6a33fa09:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T10:14:47,637 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:14:47,637 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:47,637 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:14:47,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741835_1011 (size=358) 2024-12-06T10:14:47,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741835_1011 (size=358) 2024-12-06T10:14:47,646 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 134944dfd362da855537f8648b2c3035, NAME => 'hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9 2024-12-06T10:14:47,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741836_1012 (size=42) 2024-12-06T10:14:47,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741836_1012 (size=42) 2024-12-06T10:14:47,653 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:14:47,653 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 134944dfd362da855537f8648b2c3035, disabling compactions & flushes 2024-12-06T10:14:47,653 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. 2024-12-06T10:14:47,653 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. 2024-12-06T10:14:47,653 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. after waiting 0 ms 2024-12-06T10:14:47,653 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. 2024-12-06T10:14:47,653 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. 2024-12-06T10:14:47,653 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 134944dfd362da855537f8648b2c3035: 2024-12-06T10:14:47,654 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:14:47,654 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733480087654"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480087654"}]},"ts":"1733480087654"} 2024-12-06T10:14:47,656 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:14:47,657 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:14:47,657 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480087657"}]},"ts":"1733480087657"} 2024-12-06T10:14:47,658 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T10:14:47,661 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=134944dfd362da855537f8648b2c3035, ASSIGN}] 2024-12-06T10:14:47,662 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=134944dfd362da855537f8648b2c3035, ASSIGN 2024-12-06T10:14:47,663 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=134944dfd362da855537f8648b2c3035, ASSIGN; state=OFFLINE, location=552d6a33fa09,33905,1733480086872; forceNewPlan=false, retain=false 2024-12-06T10:14:47,813 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=134944dfd362da855537f8648b2c3035, regionState=OPENING, regionLocation=552d6a33fa09,33905,1733480086872 2024-12-06T10:14:47,815 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 134944dfd362da855537f8648b2c3035, server=552d6a33fa09,33905,1733480086872}] 2024-12-06T10:14:47,968 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33905,1733480086872 2024-12-06T10:14:47,972 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. 2024-12-06T10:14:47,972 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 134944dfd362da855537f8648b2c3035, NAME => 'hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:14:47,972 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 134944dfd362da855537f8648b2c3035 2024-12-06T10:14:47,972 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:14:47,972 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 134944dfd362da855537f8648b2c3035 2024-12-06T10:14:47,972 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 134944dfd362da855537f8648b2c3035 2024-12-06T10:14:47,974 INFO [StoreOpener-134944dfd362da855537f8648b2c3035-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 134944dfd362da855537f8648b2c3035 2024-12-06T10:14:47,975 INFO [StoreOpener-134944dfd362da855537f8648b2c3035-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 134944dfd362da855537f8648b2c3035 columnFamilyName info 2024-12-06T10:14:47,975 DEBUG [StoreOpener-134944dfd362da855537f8648b2c3035-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:47,976 INFO [StoreOpener-134944dfd362da855537f8648b2c3035-1 {}] regionserver.HStore(327): Store=134944dfd362da855537f8648b2c3035/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:14:47,977 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/namespace/134944dfd362da855537f8648b2c3035 2024-12-06T10:14:47,977 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/namespace/134944dfd362da855537f8648b2c3035 2024-12-06T10:14:47,979 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 134944dfd362da855537f8648b2c3035 2024-12-06T10:14:47,981 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/namespace/134944dfd362da855537f8648b2c3035/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:14:47,981 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 134944dfd362da855537f8648b2c3035; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774726, jitterRate=-0.014885365962982178}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:14:47,981 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 134944dfd362da855537f8648b2c3035: 2024-12-06T10:14:47,982 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035., pid=6, masterSystemTime=1733480087967 2024-12-06T10:14:47,984 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. 2024-12-06T10:14:47,984 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. 2024-12-06T10:14:47,984 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=134944dfd362da855537f8648b2c3035, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,33905,1733480086872 2024-12-06T10:14:47,988 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T10:14:47,988 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 134944dfd362da855537f8648b2c3035, server=552d6a33fa09,33905,1733480086872 in 171 msec 2024-12-06T10:14:47,989 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T10:14:47,989 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=134944dfd362da855537f8648b2c3035, ASSIGN in 327 msec 2024-12-06T10:14:47,990 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:14:47,990 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480087990"}]},"ts":"1733480087990"} 2024-12-06T10:14:47,992 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T10:14:47,994 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:14:47,995 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 359 msec 2024-12-06T10:14:48,037 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T10:14:48,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:14:48,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:48,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:14:48,043 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T10:14:48,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:14:48,053 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 10 msec 2024-12-06T10:14:48,055 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T10:14:48,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:14:48,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 8 msec 2024-12-06T10:14:48,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T10:14:48,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T10:14:48,071 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.179sec 2024-12-06T10:14:48,071 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T10:14:48,071 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T10:14:48,071 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T10:14:48,071 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T10:14:48,071 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T10:14:48,071 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,34115,1733480086823-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:14:48,071 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,34115,1733480086823-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T10:14:48,073 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T10:14:48,073 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T10:14:48,073 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,34115,1733480086823-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:14:48,089 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31a733e5 to 127.0.0.1:60928 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12e1f3d6 2024-12-06T10:14:48,092 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e3f5f36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:14:48,094 DEBUG [hconnection-0x6a904f20-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:14:48,096 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:14:48,097 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=552d6a33fa09,34115,1733480086823 2024-12-06T10:14:48,098 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:14:48,100 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-06T10:14:48,100 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T10:14:48,103 INFO [RS-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35512, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T10:14:48,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34115 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T10:14:48,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34115 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T10:14:48,104 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34115 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:14:48,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34115 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-06T10:14:48,106 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:14:48,106 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:48,107 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34115 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 9 2024-12-06T10:14:48,107 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:14:48,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34115 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:14:48,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741837_1013 (size=381) 2024-12-06T10:14:48,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741837_1013 (size=381) 2024-12-06T10:14:48,116 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9f98efa95d36f7a34c151ce0056e8990, NAME => 'TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9 2024-12-06T10:14:48,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741838_1014 (size=64) 2024-12-06T10:14:48,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741838_1014 (size=64) 2024-12-06T10:14:48,125 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:14:48,125 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing 9f98efa95d36f7a34c151ce0056e8990, disabling compactions & flushes 2024-12-06T10:14:48,126 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:14:48,126 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:14:48,126 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. after waiting 0 ms 2024-12-06T10:14:48,126 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:14:48,126 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:14:48,126 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9f98efa95d36f7a34c151ce0056e8990: 2024-12-06T10:14:48,127 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:14:48,127 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733480088127"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480088127"}]},"ts":"1733480088127"} 2024-12-06T10:14:48,128 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:14:48,129 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:14:48,129 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480088129"}]},"ts":"1733480088129"} 2024-12-06T10:14:48,131 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-06T10:14:48,134 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f98efa95d36f7a34c151ce0056e8990, ASSIGN}] 2024-12-06T10:14:48,135 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f98efa95d36f7a34c151ce0056e8990, ASSIGN 2024-12-06T10:14:48,135 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f98efa95d36f7a34c151ce0056e8990, ASSIGN; state=OFFLINE, location=552d6a33fa09,33905,1733480086872; forceNewPlan=false, retain=false 2024-12-06T10:14:48,286 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=9f98efa95d36f7a34c151ce0056e8990, regionState=OPENING, regionLocation=552d6a33fa09,33905,1733480086872 2024-12-06T10:14:48,288 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 9f98efa95d36f7a34c151ce0056e8990, server=552d6a33fa09,33905,1733480086872}] 2024-12-06T10:14:48,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:48,440 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33905,1733480086872 2024-12-06T10:14:48,444 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:14:48,444 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 9f98efa95d36f7a34c151ce0056e8990, NAME => 'TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:14:48,444 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:14:48,444 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:14:48,444 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:14:48,444 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:14:48,445 INFO [StoreOpener-9f98efa95d36f7a34c151ce0056e8990-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:14:48,447 INFO [StoreOpener-9f98efa95d36f7a34c151ce0056e8990-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9f98efa95d36f7a34c151ce0056e8990 columnFamilyName info 2024-12-06T10:14:48,447 DEBUG [StoreOpener-9f98efa95d36f7a34c151ce0056e8990-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:14:48,447 INFO [StoreOpener-9f98efa95d36f7a34c151ce0056e8990-1 {}] regionserver.HStore(327): Store=9f98efa95d36f7a34c151ce0056e8990/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:14:48,448 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:14:48,448 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:14:48,450 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:14:48,452 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:14:48,452 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 9f98efa95d36f7a34c151ce0056e8990; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854515, jitterRate=0.0865725725889206}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:14:48,453 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 9f98efa95d36f7a34c151ce0056e8990: 2024-12-06T10:14:48,454 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990., pid=11, masterSystemTime=1733480088440 2024-12-06T10:14:48,455 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:14:48,455 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:14:48,456 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=9f98efa95d36f7a34c151ce0056e8990, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,33905,1733480086872 2024-12-06T10:14:48,459 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-06T10:14:48,459 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 9f98efa95d36f7a34c151ce0056e8990, server=552d6a33fa09,33905,1733480086872 in 169 msec 2024-12-06T10:14:48,461 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-06T10:14:48,461 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f98efa95d36f7a34c151ce0056e8990, ASSIGN in 326 msec 2024-12-06T10:14:48,462 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:14:48,462 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480088462"}]},"ts":"1733480088462"} 2024-12-06T10:14:48,463 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-06T10:14:48,465 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:14:48,467 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRolling in 361 msec 2024-12-06T10:14:49,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:50,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:50,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:50,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,201 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T10:14:51,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:14:51,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:52,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:53,114 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T10:14:53,115 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-06T10:14:53,115 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-06T10:14:53,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:54,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:55,196 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-06T10:14:55,196 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-06T10:14:55,196 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T10:14:55,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:56,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:57,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:58,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34115 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T10:14:58,109 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling, procId: 9 completed 2024-12-06T10:14:58,112 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-06T10:14:58,112 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:14:58,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:14:58,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f98efa95d36f7a34c151ce0056e8990 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T10:14:58,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/42f9915ee8874c7b9daaeb312b14ea15 is 1080, key is row0001/info:/1733480098115/Put/seqid=0 2024-12-06T10:14:58,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741839_1015 (size=12509) 2024-12-06T10:14:58,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741839_1015 (size=12509) 2024-12-06T10:14:58,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/42f9915ee8874c7b9daaeb312b14ea15 2024-12-06T10:14:58,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=9f98efa95d36f7a34c151ce0056e8990, server=552d6a33fa09,33905,1733480086872 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:14:58,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:36200 deadline: 1733480108148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=9f98efa95d36f7a34c151ce0056e8990, server=552d6a33fa09,33905,1733480086872 2024-12-06T10:14:58,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/42f9915ee8874c7b9daaeb312b14ea15 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/42f9915ee8874c7b9daaeb312b14ea15 2024-12-06T10:14:58,158 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/42f9915ee8874c7b9daaeb312b14ea15, entries=7, sequenceid=11, filesize=12.2 K 2024-12-06T10:14:58,159 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 9f98efa95d36f7a34c151ce0056e8990 in 36ms, sequenceid=11, compaction requested=false 2024-12-06T10:14:58,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f98efa95d36f7a34c151ce0056e8990: 2024-12-06T10:14:58,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:14:59,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:00,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:00,698 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T10:15:00,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,718 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,718 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,718 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:00,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:01,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:02,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:03,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:04,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:05,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:05,535 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=1, created chunk count=15, reused chunk count=36, reuseRatio=70.59% 2024-12-06T10:15:05,535 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-06T10:15:06,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:07,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:08,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f98efa95d36f7a34c151ce0056e8990 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-06T10:15:08,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/98bd992610ff4baa91dbea166026e4b6 is 1080, key is row0008/info:/1733480098123/Put/seqid=0 2024-12-06T10:15:08,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741840_1016 (size=29761) 2024-12-06T10:15:08,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741840_1016 (size=29761) 2024-12-06T10:15:08,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/98bd992610ff4baa91dbea166026e4b6 2024-12-06T10:15:08,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/98bd992610ff4baa91dbea166026e4b6 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/98bd992610ff4baa91dbea166026e4b6 2024-12-06T10:15:08,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/98bd992610ff4baa91dbea166026e4b6, entries=23, sequenceid=37, filesize=29.1 K 2024-12-06T10:15:08,258 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 9f98efa95d36f7a34c151ce0056e8990 in 23ms, sequenceid=37, compaction requested=false 2024-12-06T10:15:08,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f98efa95d36f7a34c151ce0056e8990: 2024-12-06T10:15:08,259 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=41.3 K, sizeToCheck=16.0 K 2024-12-06T10:15:08,259 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T10:15:08,259 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/98bd992610ff4baa91dbea166026e4b6 because midkey is the same as first or last row 2024-12-06T10:15:08,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:09,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:10,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:10,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f98efa95d36f7a34c151ce0056e8990 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T10:15:10,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/fd66bca981a346cd99112c94f6e677d6 is 1080, key is row0031/info:/1733480108235/Put/seqid=0 2024-12-06T10:15:10,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741841_1017 (size=12509) 2024-12-06T10:15:10,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741841_1017 (size=12509) 2024-12-06T10:15:10,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/fd66bca981a346cd99112c94f6e677d6 2024-12-06T10:15:10,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/fd66bca981a346cd99112c94f6e677d6 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fd66bca981a346cd99112c94f6e677d6 2024-12-06T10:15:10,268 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fd66bca981a346cd99112c94f6e677d6, entries=7, sequenceid=47, filesize=12.2 K 2024-12-06T10:15:10,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=21.02 KB/21520 for 9f98efa95d36f7a34c151ce0056e8990 in 25ms, sequenceid=47, compaction requested=true 2024-12-06T10:15:10,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f98efa95d36f7a34c151ce0056e8990: 2024-12-06T10:15:10,269 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=53.5 K, sizeToCheck=16.0 K 2024-12-06T10:15:10,269 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T10:15:10,269 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/98bd992610ff4baa91dbea166026e4b6 because midkey is the same as first or last row 2024-12-06T10:15:10,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f98efa95d36f7a34c151ce0056e8990:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:15:10,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:10,270 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:15:10,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:10,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f98efa95d36f7a34c151ce0056e8990 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-12-06T10:15:10,271 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:15:10,271 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1540): 9f98efa95d36f7a34c151ce0056e8990/info is initiating minor compaction (all files) 2024-12-06T10:15:10,271 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f98efa95d36f7a34c151ce0056e8990/info in TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:15:10,271 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/42f9915ee8874c7b9daaeb312b14ea15, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/98bd992610ff4baa91dbea166026e4b6, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fd66bca981a346cd99112c94f6e677d6] into tmpdir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp, totalSize=53.5 K 2024-12-06T10:15:10,272 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42f9915ee8874c7b9daaeb312b14ea15, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733480098115 2024-12-06T10:15:10,272 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98bd992610ff4baa91dbea166026e4b6, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733480098123 2024-12-06T10:15:10,273 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd66bca981a346cd99112c94f6e677d6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733480108235 2024-12-06T10:15:10,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/b3041a17767d46e894307919062f7848 is 1080, key is row0038/info:/1733480110244/Put/seqid=0 2024-12-06T10:15:10,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741842_1018 (size=27607) 2024-12-06T10:15:10,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741842_1018 (size=27607) 2024-12-06T10:15:10,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=71 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/b3041a17767d46e894307919062f7848 2024-12-06T10:15:10,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/b3041a17767d46e894307919062f7848 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/b3041a17767d46e894307919062f7848 2024-12-06T10:15:10,298 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f98efa95d36f7a34c151ce0056e8990#info#compaction#42 average throughput is 9.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:15:10,299 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/156281b0c9304169a4006470836fe05c is 1080, key is row0001/info:/1733480098115/Put/seqid=0 2024-12-06T10:15:10,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/b3041a17767d46e894307919062f7848, entries=21, sequenceid=71, filesize=27.0 K 2024-12-06T10:15:10,302 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=6.30 KB/6456 for 9f98efa95d36f7a34c151ce0056e8990 in 32ms, sequenceid=71, compaction requested=false 2024-12-06T10:15:10,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f98efa95d36f7a34c151ce0056e8990: 2024-12-06T10:15:10,302 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=80.5 K, sizeToCheck=16.0 K 2024-12-06T10:15:10,302 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T10:15:10,302 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/98bd992610ff4baa91dbea166026e4b6 because midkey is the same as first or last row 2024-12-06T10:15:10,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741843_1019 (size=44978) 2024-12-06T10:15:10,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741843_1019 (size=44978) 2024-12-06T10:15:10,311 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/156281b0c9304169a4006470836fe05c as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/156281b0c9304169a4006470836fe05c 2024-12-06T10:15:10,316 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f98efa95d36f7a34c151ce0056e8990/info of 9f98efa95d36f7a34c151ce0056e8990 into 156281b0c9304169a4006470836fe05c(size=43.9 K), total size for store is 70.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:15:10,316 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f98efa95d36f7a34c151ce0056e8990: 2024-12-06T10:15:10,316 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990., storeName=9f98efa95d36f7a34c151ce0056e8990/info, priority=13, startTime=1733480110269; duration=0sec 2024-12-06T10:15:10,316 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=70.9 K, sizeToCheck=16.0 K 2024-12-06T10:15:10,316 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T10:15:10,316 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/156281b0c9304169a4006470836fe05c because midkey is the same as first or last row 2024-12-06T10:15:10,316 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:10,316 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f98efa95d36f7a34c151ce0056e8990:info 2024-12-06T10:15:10,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:11,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:12,245 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,245 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,245 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,245 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,279 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:12,280 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f98efa95d36f7a34c151ce0056e8990 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T10:15:12,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/fd94146835bd47209511ff1f1c3deef5 is 1080, key is row0059/info:/1733480110271/Put/seqid=0 2024-12-06T10:15:12,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741844_1020 (size=12509) 2024-12-06T10:15:12,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741844_1020 (size=12509) 2024-12-06T10:15:12,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/fd94146835bd47209511ff1f1c3deef5 2024-12-06T10:15:12,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/fd94146835bd47209511ff1f1c3deef5 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fd94146835bd47209511ff1f1c3deef5 2024-12-06T10:15:12,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fd94146835bd47209511ff1f1c3deef5, entries=7, sequenceid=82, filesize=12.2 K 2024-12-06T10:15:12,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 9f98efa95d36f7a34c151ce0056e8990 in 24ms, sequenceid=82, compaction requested=true 2024-12-06T10:15:12,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f98efa95d36f7a34c151ce0056e8990: 2024-12-06T10:15:12,304 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=83.1 K, sizeToCheck=16.0 K 2024-12-06T10:15:12,304 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T10:15:12,304 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/156281b0c9304169a4006470836fe05c because midkey is the same as first or last row 2024-12-06T10:15:12,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f98efa95d36f7a34c151ce0056e8990:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:15:12,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:12,305 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:15:12,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:12,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f98efa95d36f7a34c151ce0056e8990 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-12-06T10:15:12,306 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:15:12,306 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1540): 9f98efa95d36f7a34c151ce0056e8990/info is initiating minor compaction (all files) 2024-12-06T10:15:12,306 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f98efa95d36f7a34c151ce0056e8990/info in TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:15:12,306 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/156281b0c9304169a4006470836fe05c, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/b3041a17767d46e894307919062f7848, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fd94146835bd47209511ff1f1c3deef5] into tmpdir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp, totalSize=83.1 K 2024-12-06T10:15:12,306 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 156281b0c9304169a4006470836fe05c, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733480098115 2024-12-06T10:15:12,307 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3041a17767d46e894307919062f7848, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=71, earliestPutTs=1733480110244 2024-12-06T10:15:12,307 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd94146835bd47209511ff1f1c3deef5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733480110271 2024-12-06T10:15:12,311 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/fc5a4b47fe2a4ed6a4a5a7f5ff9584e5 is 1080, key is row0066/info:/1733480112281/Put/seqid=0 2024-12-06T10:15:12,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741845_1021 (size=26530) 2024-12-06T10:15:12,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741845_1021 (size=26530) 2024-12-06T10:15:12,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/fc5a4b47fe2a4ed6a4a5a7f5ff9584e5 2024-12-06T10:15:12,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=9f98efa95d36f7a34c151ce0056e8990, server=552d6a33fa09,33905,1733480086872 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:15:12,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:36200 deadline: 1733480122320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=9f98efa95d36f7a34c151ce0056e8990, server=552d6a33fa09,33905,1733480086872 2024-12-06T10:15:12,324 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f98efa95d36f7a34c151ce0056e8990#info#compaction#45 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:15:12,325 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/1b1a1952f65143268ab7945362a46a0b is 1080, key is row0001/info:/1733480098115/Put/seqid=0 2024-12-06T10:15:12,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/fc5a4b47fe2a4ed6a4a5a7f5ff9584e5 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fc5a4b47fe2a4ed6a4a5a7f5ff9584e5 2024-12-06T10:15:12,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741846_1022 (size=75378) 2024-12-06T10:15:12,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741846_1022 (size=75378) 2024-12-06T10:15:12,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fc5a4b47fe2a4ed6a4a5a7f5ff9584e5, entries=20, sequenceid=105, filesize=25.9 K 2024-12-06T10:15:12,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=9.46 KB/9684 for 9f98efa95d36f7a34c151ce0056e8990 in 28ms, sequenceid=105, compaction requested=false 2024-12-06T10:15:12,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f98efa95d36f7a34c151ce0056e8990: 2024-12-06T10:15:12,333 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=109.0 K, sizeToCheck=16.0 K 2024-12-06T10:15:12,333 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T10:15:12,333 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/156281b0c9304169a4006470836fe05c because midkey is the same as first or last row 2024-12-06T10:15:12,336 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/1b1a1952f65143268ab7945362a46a0b as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/1b1a1952f65143268ab7945362a46a0b 2024-12-06T10:15:12,342 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f98efa95d36f7a34c151ce0056e8990/info of 9f98efa95d36f7a34c151ce0056e8990 into 1b1a1952f65143268ab7945362a46a0b(size=73.6 K), total size for store is 99.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:15:12,342 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f98efa95d36f7a34c151ce0056e8990: 2024-12-06T10:15:12,342 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990., storeName=9f98efa95d36f7a34c151ce0056e8990/info, priority=13, startTime=1733480112304; duration=0sec 2024-12-06T10:15:12,342 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=99.5 K, sizeToCheck=16.0 K 2024-12-06T10:15:12,342 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T10:15:12,343 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:12,343 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:12,343 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f98efa95d36f7a34c151ce0056e8990:info 2024-12-06T10:15:12,345 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34115 {}] assignment.AssignmentManager(1346): Split request from 552d6a33fa09,33905,1733480086872, parent={ENCODED => 9f98efa95d36f7a34c151ce0056e8990, NAME => 'TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-06T10:15:12,349 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34115 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=552d6a33fa09,33905,1733480086872 2024-12-06T10:15:12,353 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34115 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=9f98efa95d36f7a34c151ce0056e8990, daughterA=ed4e3188ab7b31acc3d28a07be561675, daughterB=8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:12,354 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=9f98efa95d36f7a34c151ce0056e8990, daughterA=ed4e3188ab7b31acc3d28a07be561675, daughterB=8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:12,354 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=9f98efa95d36f7a34c151ce0056e8990, daughterA=ed4e3188ab7b31acc3d28a07be561675, daughterB=8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:12,354 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=9f98efa95d36f7a34c151ce0056e8990, daughterA=ed4e3188ab7b31acc3d28a07be561675, daughterB=8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:12,360 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f98efa95d36f7a34c151ce0056e8990, UNASSIGN}] 2024-12-06T10:15:12,361 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f98efa95d36f7a34c151ce0056e8990, UNASSIGN 2024-12-06T10:15:12,362 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=9f98efa95d36f7a34c151ce0056e8990, regionState=CLOSING, regionLocation=552d6a33fa09,33905,1733480086872 2024-12-06T10:15:12,363 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-06T10:15:12,364 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE; CloseRegionProcedure 9f98efa95d36f7a34c151ce0056e8990, server=552d6a33fa09,33905,1733480086872}] 2024-12-06T10:15:12,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:12,518 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33905,1733480086872 2024-12-06T10:15:12,520 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(124): Close 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:12,520 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-06T10:15:12,521 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1681): Closing 9f98efa95d36f7a34c151ce0056e8990, disabling compactions & flushes 2024-12-06T10:15:12,521 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:15:12,521 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:15:12,521 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. after waiting 0 ms 2024-12-06T10:15:12,521 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:15:12,521 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(2837): Flushing 9f98efa95d36f7a34c151ce0056e8990 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-06T10:15:12,525 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/e0ceb309a88b4f958968a2a4eb849c94 is 1080, key is row0086/info:/1733480112306/Put/seqid=0 2024-12-06T10:15:12,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741847_1023 (size=14663) 2024-12-06T10:15:12,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741847_1023 (size=14663) 2024-12-06T10:15:12,531 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/e0ceb309a88b4f958968a2a4eb849c94 2024-12-06T10:15:12,537 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/.tmp/info/e0ceb309a88b4f958968a2a4eb849c94 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/e0ceb309a88b4f958968a2a4eb849c94 2024-12-06T10:15:12,542 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/e0ceb309a88b4f958968a2a4eb849c94, entries=9, sequenceid=118, filesize=14.3 K 2024-12-06T10:15:12,543 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 9f98efa95d36f7a34c151ce0056e8990 in 22ms, sequenceid=118, compaction requested=true 2024-12-06T10:15:12,544 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/42f9915ee8874c7b9daaeb312b14ea15, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/98bd992610ff4baa91dbea166026e4b6, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/156281b0c9304169a4006470836fe05c, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fd66bca981a346cd99112c94f6e677d6, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/b3041a17767d46e894307919062f7848, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fd94146835bd47209511ff1f1c3deef5] to archive 2024-12-06T10:15:12,545 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:15:12,547 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/42f9915ee8874c7b9daaeb312b14ea15 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/42f9915ee8874c7b9daaeb312b14ea15 2024-12-06T10:15:12,548 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/98bd992610ff4baa91dbea166026e4b6 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/98bd992610ff4baa91dbea166026e4b6 2024-12-06T10:15:12,549 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/156281b0c9304169a4006470836fe05c to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/156281b0c9304169a4006470836fe05c 2024-12-06T10:15:12,550 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fd66bca981a346cd99112c94f6e677d6 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fd66bca981a346cd99112c94f6e677d6 2024-12-06T10:15:12,551 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/b3041a17767d46e894307919062f7848 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/b3041a17767d46e894307919062f7848 2024-12-06T10:15:12,552 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fd94146835bd47209511ff1f1c3deef5 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fd94146835bd47209511ff1f1c3deef5 2024-12-06T10:15:12,557 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=1 2024-12-06T10:15:12,557 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. 2024-12-06T10:15:12,557 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1635): Region close journal for 9f98efa95d36f7a34c151ce0056e8990: 2024-12-06T10:15:12,559 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(170): Closed 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:12,560 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=9f98efa95d36f7a34c151ce0056e8990, regionState=CLOSED 2024-12-06T10:15:12,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=14, resume processing ppid=13 2024-12-06T10:15:12,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=13, state=SUCCESS; CloseRegionProcedure 9f98efa95d36f7a34c151ce0056e8990, server=552d6a33fa09,33905,1733480086872 in 198 msec 2024-12-06T10:15:12,565 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-06T10:15:12,565 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f98efa95d36f7a34c151ce0056e8990, UNASSIGN in 203 msec 2024-12-06T10:15:12,585 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:15:12,587 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=12 splitting 3 storefiles, region=9f98efa95d36f7a34c151ce0056e8990, threads=3 2024-12-06T10:15:12,588 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/1b1a1952f65143268ab7945362a46a0b for region: 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:12,588 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/e0ceb309a88b4f958968a2a4eb849c94 for region: 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:12,588 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fc5a4b47fe2a4ed6a4a5a7f5ff9584e5 for region: 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:12,598 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fc5a4b47fe2a4ed6a4a5a7f5ff9584e5, top=true 2024-12-06T10:15:12,599 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/e0ceb309a88b4f958968a2a4eb849c94, top=true 2024-12-06T10:15:12,603 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-fc5a4b47fe2a4ed6a4a5a7f5ff9584e5 for child: 8b11b268c80f36c7f95e9691eadcddc2, parent: 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:12,603 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/fc5a4b47fe2a4ed6a4a5a7f5ff9584e5 for region: 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:12,603 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-e0ceb309a88b4f958968a2a4eb849c94 for child: 8b11b268c80f36c7f95e9691eadcddc2, parent: 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:12,603 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/e0ceb309a88b4f958968a2a4eb849c94 for region: 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:12,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741848_1024 (size=27) 2024-12-06T10:15:12,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741848_1024 (size=27) 2024-12-06T10:15:12,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741849_1025 (size=27) 2024-12-06T10:15:12,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741849_1025 (size=27) 2024-12-06T10:15:12,618 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/1b1a1952f65143268ab7945362a46a0b for region: 9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:15:12,618 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=12 split storefiles for region 9f98efa95d36f7a34c151ce0056e8990 Daughter A: [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675/info/1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990] storefiles, Daughter B: [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-e0ceb309a88b4f958968a2a4eb849c94, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-fc5a4b47fe2a4ed6a4a5a7f5ff9584e5] storefiles. 2024-12-06T10:15:12,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741850_1026 (size=71) 2024-12-06T10:15:12,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741850_1026 (size=71) 2024-12-06T10:15:12,632 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:15:12,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741851_1027 (size=71) 2024-12-06T10:15:12,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741851_1027 (size=71) 2024-12-06T10:15:12,651 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:15:12,660 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-12-06T10:15:12,662 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-12-06T10:15:12,664 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733480112664"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733480112664"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733480112664"}]},"ts":"1733480112664"} 2024-12-06T10:15:12,664 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733480112664"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480112664"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733480112664"}]},"ts":"1733480112664"} 2024-12-06T10:15:12,664 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733480112664"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480112664"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733480112664"}]},"ts":"1733480112664"} 2024-12-06T10:15:12,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33905 {}] regionserver.HRegion(8581): Flush requested on 1588230740 2024-12-06T10:15:12,694 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-06T10:15:12,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=4.75 KB heapSize=8.29 KB 2024-12-06T10:15:12,699 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ed4e3188ab7b31acc3d28a07be561675, ASSIGN}, {pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8b11b268c80f36c7f95e9691eadcddc2, ASSIGN}] 2024-12-06T10:15:12,700 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ed4e3188ab7b31acc3d28a07be561675, ASSIGN 2024-12-06T10:15:12,700 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8b11b268c80f36c7f95e9691eadcddc2, ASSIGN 2024-12-06T10:15:12,701 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ed4e3188ab7b31acc3d28a07be561675, ASSIGN; state=SPLITTING_NEW, location=552d6a33fa09,33905,1733480086872; forceNewPlan=false, retain=false 2024-12-06T10:15:12,701 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8b11b268c80f36c7f95e9691eadcddc2, ASSIGN; state=SPLITTING_NEW, location=552d6a33fa09,33905,1733480086872; forceNewPlan=false, retain=false 2024-12-06T10:15:12,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/.tmp/info/4966ac108ba0417f9310bdcc053256be is 193, key is TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2./info:regioninfo/1733480112664/Put/seqid=0 2024-12-06T10:15:12,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741852_1028 (size=9423) 2024-12-06T10:15:12,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741852_1028 (size=9423) 2024-12-06T10:15:12,719 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.54 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/.tmp/info/4966ac108ba0417f9310bdcc053256be 2024-12-06T10:15:12,745 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/.tmp/table/67578ba0cb56489b82f5c00101a1eaef is 65, key is TestLogRolling-testLogRolling/table:state/1733480088462/Put/seqid=0 2024-12-06T10:15:12,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741853_1029 (size=5412) 2024-12-06T10:15:12,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741853_1029 (size=5412) 2024-12-06T10:15:12,751 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=216 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/.tmp/table/67578ba0cb56489b82f5c00101a1eaef 2024-12-06T10:15:12,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/.tmp/info/4966ac108ba0417f9310bdcc053256be as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/info/4966ac108ba0417f9310bdcc053256be 2024-12-06T10:15:12,763 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/info/4966ac108ba0417f9310bdcc053256be, entries=29, sequenceid=17, filesize=9.2 K 2024-12-06T10:15:12,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/.tmp/table/67578ba0cb56489b82f5c00101a1eaef as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/table/67578ba0cb56489b82f5c00101a1eaef 2024-12-06T10:15:12,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/table/67578ba0cb56489b82f5c00101a1eaef, entries=4, sequenceid=17, filesize=5.3 K 2024-12-06T10:15:12,771 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~4.75 KB/4869, heapSize ~8.01 KB/8200, currentSize=0 B/0 for 1588230740 in 77ms, sequenceid=17, compaction requested=false 2024-12-06T10:15:12,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-06T10:15:12,789 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T10:15:12,790 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,790 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,790 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:12,851 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=8b11b268c80f36c7f95e9691eadcddc2, regionState=OPENING, regionLocation=552d6a33fa09,33905,1733480086872 2024-12-06T10:15:12,851 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=ed4e3188ab7b31acc3d28a07be561675, regionState=OPENING, regionLocation=552d6a33fa09,33905,1733480086872 2024-12-06T10:15:12,854 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; OpenRegionProcedure 8b11b268c80f36c7f95e9691eadcddc2, server=552d6a33fa09,33905,1733480086872}] 2024-12-06T10:15:12,855 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=15, state=RUNNABLE; OpenRegionProcedure ed4e3188ab7b31acc3d28a07be561675, server=552d6a33fa09,33905,1733480086872}] 2024-12-06T10:15:13,006 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,33905,1733480086872 2024-12-06T10:15:13,010 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:15:13,010 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7285): Opening region: {ENCODED => 8b11b268c80f36c7f95e9691eadcddc2, NAME => 'TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-06T10:15:13,010 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:13,011 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:15:13,011 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7327): checking encryption for 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:13,011 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7330): checking classloading for 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:13,012 INFO [StoreOpener-8b11b268c80f36c7f95e9691eadcddc2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:13,013 INFO [StoreOpener-8b11b268c80f36c7f95e9691eadcddc2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8b11b268c80f36c7f95e9691eadcddc2 columnFamilyName info 2024-12-06T10:15:13,013 DEBUG [StoreOpener-8b11b268c80f36c7f95e9691eadcddc2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:15:13,023 DEBUG [StoreOpener-8b11b268c80f36c7f95e9691eadcddc2-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990->hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/1b1a1952f65143268ab7945362a46a0b-top 2024-12-06T10:15:13,028 DEBUG [StoreOpener-8b11b268c80f36c7f95e9691eadcddc2-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-e0ceb309a88b4f958968a2a4eb849c94 2024-12-06T10:15:13,032 DEBUG [StoreOpener-8b11b268c80f36c7f95e9691eadcddc2-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-fc5a4b47fe2a4ed6a4a5a7f5ff9584e5 2024-12-06T10:15:13,032 INFO [StoreOpener-8b11b268c80f36c7f95e9691eadcddc2-1 {}] regionserver.HStore(327): Store=8b11b268c80f36c7f95e9691eadcddc2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:15:13,033 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:13,034 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:13,035 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1085): writing seq id for 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:13,036 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1102): Opened 8b11b268c80f36c7f95e9691eadcddc2; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835702, jitterRate=0.06265032291412354}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:15:13,037 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1001): Region open journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:15:13,037 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2., pid=17, masterSystemTime=1733480113006 2024-12-06T10:15:13,038 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(403): Add compact mark for store 8b11b268c80f36c7f95e9691eadcddc2:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:15:13,038 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:13,038 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:15:13,039 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:15:13,039 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1540): 8b11b268c80f36c7f95e9691eadcddc2/info is initiating minor compaction (all files) 2024-12-06T10:15:13,039 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8b11b268c80f36c7f95e9691eadcddc2/info in TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:15:13,039 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990->hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/1b1a1952f65143268ab7945362a46a0b-top, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-fc5a4b47fe2a4ed6a4a5a7f5ff9584e5, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-e0ceb309a88b4f958968a2a4eb849c94] into tmpdir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp, totalSize=113.8 K 2024-12-06T10:15:13,039 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:15:13,039 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:15:13,039 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675. 2024-12-06T10:15:13,039 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7285): Opening region: {ENCODED => ed4e3188ab7b31acc3d28a07be561675, NAME => 'TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-06T10:15:13,040 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733480098115 2024-12-06T10:15:13,040 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling ed4e3188ab7b31acc3d28a07be561675 2024-12-06T10:15:13,040 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:15:13,040 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7327): checking encryption for ed4e3188ab7b31acc3d28a07be561675 2024-12-06T10:15:13,040 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7330): checking classloading for ed4e3188ab7b31acc3d28a07be561675 2024-12-06T10:15:13,040 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=8b11b268c80f36c7f95e9691eadcddc2, regionState=OPEN, openSeqNum=122, regionLocation=552d6a33fa09,33905,1733480086872 2024-12-06T10:15:13,040 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-fc5a4b47fe2a4ed6a4a5a7f5ff9584e5, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1733480112281 2024-12-06T10:15:13,041 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-e0ceb309a88b4f958968a2a4eb849c94, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733480112306 2024-12-06T10:15:13,041 INFO [StoreOpener-ed4e3188ab7b31acc3d28a07be561675-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ed4e3188ab7b31acc3d28a07be561675 2024-12-06T10:15:13,042 INFO [StoreOpener-ed4e3188ab7b31acc3d28a07be561675-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed4e3188ab7b31acc3d28a07be561675 columnFamilyName info 2024-12-06T10:15:13,042 DEBUG [StoreOpener-ed4e3188ab7b31acc3d28a07be561675-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:15:13,044 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-06T10:15:13,044 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; OpenRegionProcedure 8b11b268c80f36c7f95e9691eadcddc2, server=552d6a33fa09,33905,1733480086872 in 188 msec 2024-12-06T10:15:13,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8b11b268c80f36c7f95e9691eadcddc2, ASSIGN in 345 msec 2024-12-06T10:15:13,051 DEBUG [StoreOpener-ed4e3188ab7b31acc3d28a07be561675-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675/info/1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990->hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/1b1a1952f65143268ab7945362a46a0b-bottom 2024-12-06T10:15:13,051 INFO [StoreOpener-ed4e3188ab7b31acc3d28a07be561675-1 {}] regionserver.HStore(327): Store=ed4e3188ab7b31acc3d28a07be561675/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:15:13,052 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675 2024-12-06T10:15:13,053 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675 2024-12-06T10:15:13,055 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1085): writing seq id for ed4e3188ab7b31acc3d28a07be561675 2024-12-06T10:15:13,056 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1102): Opened ed4e3188ab7b31acc3d28a07be561675; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695569, jitterRate=-0.11553941667079926}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:15:13,056 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1001): Region open journal for ed4e3188ab7b31acc3d28a07be561675: 2024-12-06T10:15:13,057 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675., pid=18, masterSystemTime=1733480113006 2024-12-06T10:15:13,057 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(403): Add compact mark for store ed4e3188ab7b31acc3d28a07be561675:info, priority=-2147483648, current under compaction store size is 2 2024-12-06T10:15:13,057 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:13,057 DEBUG [RS:0;552d6a33fa09:33905-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-06T10:15:13,058 INFO [RS:0;552d6a33fa09:33905-longCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675. 2024-12-06T10:15:13,058 DEBUG [RS:0;552d6a33fa09:33905-longCompactions-0 {}] regionserver.HStore(1540): ed4e3188ab7b31acc3d28a07be561675/info is initiating minor compaction (all files) 2024-12-06T10:15:13,058 INFO [RS:0;552d6a33fa09:33905-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed4e3188ab7b31acc3d28a07be561675/info in TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675. 2024-12-06T10:15:13,058 INFO [RS:0;552d6a33fa09:33905-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675/info/1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990->hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/1b1a1952f65143268ab7945362a46a0b-bottom] into tmpdir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675/.tmp, totalSize=73.6 K 2024-12-06T10:15:13,059 DEBUG [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675. 2024-12-06T10:15:13,059 INFO [RS_OPEN_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675. 2024-12-06T10:15:13,059 DEBUG [RS:0;552d6a33fa09:33905-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733480098115 2024-12-06T10:15:13,059 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=ed4e3188ab7b31acc3d28a07be561675, regionState=OPEN, openSeqNum=122, regionLocation=552d6a33fa09,33905,1733480086872 2024-12-06T10:15:13,063 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=15 2024-12-06T10:15:13,063 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=15, state=SUCCESS; OpenRegionProcedure ed4e3188ab7b31acc3d28a07be561675, server=552d6a33fa09,33905,1733480086872 in 207 msec 2024-12-06T10:15:13,064 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8b11b268c80f36c7f95e9691eadcddc2#info#compaction#49 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:15:13,065 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/10ab0d1e18d64fdfb1ea28138c502ee8 is 1080, key is row0062/info:/1733480110275/Put/seqid=0 2024-12-06T10:15:13,065 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=12 2024-12-06T10:15:13,065 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ed4e3188ab7b31acc3d28a07be561675, ASSIGN in 364 msec 2024-12-06T10:15:13,067 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=9f98efa95d36f7a34c151ce0056e8990, daughterA=ed4e3188ab7b31acc3d28a07be561675, daughterB=8b11b268c80f36c7f95e9691eadcddc2 in 716 msec 2024-12-06T10:15:13,077 INFO [RS:0;552d6a33fa09:33905-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed4e3188ab7b31acc3d28a07be561675#info#compaction#50 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:15:13,078 DEBUG [RS:0;552d6a33fa09:33905-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675/.tmp/info/a254547fa1ce4ed3b712e823a7ae5036 is 1080, key is row0001/info:/1733480098115/Put/seqid=0 2024-12-06T10:15:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741854_1030 (size=40830) 2024-12-06T10:15:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741854_1030 (size=40830) 2024-12-06T10:15:13,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741855_1031 (size=70862) 2024-12-06T10:15:13,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741855_1031 (size=70862) 2024-12-06T10:15:13,087 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/10ab0d1e18d64fdfb1ea28138c502ee8 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/10ab0d1e18d64fdfb1ea28138c502ee8 2024-12-06T10:15:13,089 DEBUG [RS:0;552d6a33fa09:33905-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675/.tmp/info/a254547fa1ce4ed3b712e823a7ae5036 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675/info/a254547fa1ce4ed3b712e823a7ae5036 2024-12-06T10:15:13,093 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8b11b268c80f36c7f95e9691eadcddc2/info of 8b11b268c80f36c7f95e9691eadcddc2 into 10ab0d1e18d64fdfb1ea28138c502ee8(size=39.9 K), total size for store is 39.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:15:13,093 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:15:13,093 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2., storeName=8b11b268c80f36c7f95e9691eadcddc2/info, priority=13, startTime=1733480113037; duration=0sec 2024-12-06T10:15:13,093 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:13,093 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8b11b268c80f36c7f95e9691eadcddc2:info 2024-12-06T10:15:13,096 INFO [RS:0;552d6a33fa09:33905-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 1 (all) file(s) in ed4e3188ab7b31acc3d28a07be561675/info of ed4e3188ab7b31acc3d28a07be561675 into a254547fa1ce4ed3b712e823a7ae5036(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:15:13,096 DEBUG [RS:0;552d6a33fa09:33905-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed4e3188ab7b31acc3d28a07be561675: 2024-12-06T10:15:13,096 INFO [RS:0;552d6a33fa09:33905-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675., storeName=ed4e3188ab7b31acc3d28a07be561675/info, priority=15, startTime=1733480113057; duration=0sec 2024-12-06T10:15:13,096 DEBUG [RS:0;552d6a33fa09:33905-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:13,096 DEBUG [RS:0;552d6a33fa09:33905-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed4e3188ab7b31acc3d28a07be561675:info 2024-12-06T10:15:13,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:14,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:15,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:16,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:16,805 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:15:17,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:18,060 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T10:15:18,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,080 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,080 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,085 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T10:15:18,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:19,104 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:15:19,106 INFO [RS-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:15:19,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:20,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:21,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:22,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:36200 deadline: 1733480132330, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733480088104.9f98efa95d36f7a34c151ce0056e8990. is not online on 552d6a33fa09,33905,1733480086872 2024-12-06T10:15:22,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:23,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:24,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:25,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:26,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:27,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:28,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:28,620 INFO [master/552d6a33fa09:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T10:15:28,620 INFO [master/552d6a33fa09:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T10:15:29,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:30,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:31,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:32,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:32,604 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 14835 2024-12-06T10:15:33,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:34,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:35,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:36,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:37,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:38,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:39,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:40,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:41,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:42,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:43,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:44,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:44,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:44,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8b11b268c80f36c7f95e9691eadcddc2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T10:15:44,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/2a66d7bb7f5c4928b5b4baa45fc56c75 is 1080, key is row0095/info:/1733480142421/Put/seqid=0 2024-12-06T10:15:44,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741856_1032 (size=12513) 2024-12-06T10:15:44,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741856_1032 (size=12513) 2024-12-06T10:15:44,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/2a66d7bb7f5c4928b5b4baa45fc56c75 2024-12-06T10:15:44,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/2a66d7bb7f5c4928b5b4baa45fc56c75 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/2a66d7bb7f5c4928b5b4baa45fc56c75 2024-12-06T10:15:44,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/2a66d7bb7f5c4928b5b4baa45fc56c75, entries=7, sequenceid=132, filesize=12.2 K 2024-12-06T10:15:44,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for 8b11b268c80f36c7f95e9691eadcddc2 in 24ms, sequenceid=132, compaction requested=false 2024-12-06T10:15:44,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:15:44,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:44,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8b11b268c80f36c7f95e9691eadcddc2 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-06T10:15:44,459 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/d534f237195c4fd3a0b3931ce375a07e is 1080, key is row0102/info:/1733480144431/Put/seqid=0 2024-12-06T10:15:44,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741857_1033 (size=29784) 2024-12-06T10:15:44,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741857_1033 (size=29784) 2024-12-06T10:15:44,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/d534f237195c4fd3a0b3931ce375a07e 2024-12-06T10:15:44,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/d534f237195c4fd3a0b3931ce375a07e as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/d534f237195c4fd3a0b3931ce375a07e 2024-12-06T10:15:44,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/d534f237195c4fd3a0b3931ce375a07e, entries=23, sequenceid=158, filesize=29.1 K 2024-12-06T10:15:44,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=4.20 KB/4304 for 8b11b268c80f36c7f95e9691eadcddc2 in 20ms, sequenceid=158, compaction requested=true 2024-12-06T10:15:44,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:15:44,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8b11b268c80f36c7f95e9691eadcddc2:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:15:44,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:44,476 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:15:44,477 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 83127 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:15:44,477 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1540): 8b11b268c80f36c7f95e9691eadcddc2/info is initiating minor compaction (all files) 2024-12-06T10:15:44,477 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8b11b268c80f36c7f95e9691eadcddc2/info in TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:15:44,477 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/10ab0d1e18d64fdfb1ea28138c502ee8, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/2a66d7bb7f5c4928b5b4baa45fc56c75, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/d534f237195c4fd3a0b3931ce375a07e] into tmpdir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp, totalSize=81.2 K 2024-12-06T10:15:44,477 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10ab0d1e18d64fdfb1ea28138c502ee8, keycount=33, bloomtype=ROW, size=39.9 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733480110275 2024-12-06T10:15:44,477 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a66d7bb7f5c4928b5b4baa45fc56c75, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733480142421 2024-12-06T10:15:44,478 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting d534f237195c4fd3a0b3931ce375a07e, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733480144431 2024-12-06T10:15:44,488 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8b11b268c80f36c7f95e9691eadcddc2#info#compaction#53 average throughput is 64.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:15:44,489 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/2834823512fe4b7c92de836f2b908924 is 1080, key is row0062/info:/1733480110275/Put/seqid=0 2024-12-06T10:15:44,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741858_1034 (size=73410) 2024-12-06T10:15:44,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741858_1034 (size=73410) 2024-12-06T10:15:44,498 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/2834823512fe4b7c92de836f2b908924 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/2834823512fe4b7c92de836f2b908924 2024-12-06T10:15:44,503 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8b11b268c80f36c7f95e9691eadcddc2/info of 8b11b268c80f36c7f95e9691eadcddc2 into 2834823512fe4b7c92de836f2b908924(size=71.7 K), total size for store is 71.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:15:44,503 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:15:44,503 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2., storeName=8b11b268c80f36c7f95e9691eadcddc2/info, priority=13, startTime=1733480144476; duration=0sec 2024-12-06T10:15:44,503 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:44,503 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8b11b268c80f36c7f95e9691eadcddc2:info 2024-12-06T10:15:45,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:46,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:46,463 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8b11b268c80f36c7f95e9691eadcddc2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T10:15:46,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/b61b2e9854654d8eb4d13e542186f54c is 1080, key is row0125/info:/1733480144455/Put/seqid=0 2024-12-06T10:15:46,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741859_1035 (size=12516) 2024-12-06T10:15:46,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741859_1035 (size=12516) 2024-12-06T10:15:46,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/b61b2e9854654d8eb4d13e542186f54c 2024-12-06T10:15:46,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/b61b2e9854654d8eb4d13e542186f54c as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b61b2e9854654d8eb4d13e542186f54c 2024-12-06T10:15:46,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b61b2e9854654d8eb4d13e542186f54c, entries=7, sequenceid=169, filesize=12.2 K 2024-12-06T10:15:46,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for 8b11b268c80f36c7f95e9691eadcddc2 in 24ms, sequenceid=169, compaction requested=false 2024-12-06T10:15:46,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:15:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:46,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8b11b268c80f36c7f95e9691eadcddc2 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-12-06T10:15:46,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/0c93c624496a404b8ce8c84eac3a3e76 is 1080, key is row0132/info:/1733480146464/Put/seqid=0 2024-12-06T10:15:46,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741860_1036 (size=28706) 2024-12-06T10:15:46,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741860_1036 (size=28706) 2024-12-06T10:15:46,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/0c93c624496a404b8ce8c84eac3a3e76 2024-12-06T10:15:46,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/0c93c624496a404b8ce8c84eac3a3e76 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/0c93c624496a404b8ce8c84eac3a3e76 2024-12-06T10:15:46,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/0c93c624496a404b8ce8c84eac3a3e76, entries=22, sequenceid=194, filesize=28.0 K 2024-12-06T10:15:46,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=7.36 KB/7532 for 8b11b268c80f36c7f95e9691eadcddc2 in 21ms, sequenceid=194, compaction requested=true 2024-12-06T10:15:46,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:15:46,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8b11b268c80f36c7f95e9691eadcddc2:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:15:46,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:46,509 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:15:46,510 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 114632 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:15:46,510 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1540): 8b11b268c80f36c7f95e9691eadcddc2/info is initiating minor compaction (all files) 2024-12-06T10:15:46,510 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8b11b268c80f36c7f95e9691eadcddc2/info in TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:15:46,510 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/2834823512fe4b7c92de836f2b908924, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b61b2e9854654d8eb4d13e542186f54c, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/0c93c624496a404b8ce8c84eac3a3e76] into tmpdir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp, totalSize=111.9 K 2024-12-06T10:15:46,510 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2834823512fe4b7c92de836f2b908924, keycount=63, bloomtype=ROW, size=71.7 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733480110275 2024-12-06T10:15:46,511 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting b61b2e9854654d8eb4d13e542186f54c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733480144455 2024-12-06T10:15:46,511 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c93c624496a404b8ce8c84eac3a3e76, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733480146464 2024-12-06T10:15:46,524 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8b11b268c80f36c7f95e9691eadcddc2#info#compaction#56 average throughput is 31.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:15:46,524 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/0aec1820fa6640a5b374b3090958da5e is 1080, key is row0062/info:/1733480110275/Put/seqid=0 2024-12-06T10:15:46,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741861_1037 (size=104782) 2024-12-06T10:15:46,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741861_1037 (size=104782) 2024-12-06T10:15:46,534 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/0aec1820fa6640a5b374b3090958da5e as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/0aec1820fa6640a5b374b3090958da5e 2024-12-06T10:15:46,539 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8b11b268c80f36c7f95e9691eadcddc2/info of 8b11b268c80f36c7f95e9691eadcddc2 into 0aec1820fa6640a5b374b3090958da5e(size=102.3 K), total size for store is 102.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:15:46,539 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:15:46,539 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2., storeName=8b11b268c80f36c7f95e9691eadcddc2/info, priority=13, startTime=1733480146509; duration=0sec 2024-12-06T10:15:46,539 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:46,539 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8b11b268c80f36c7f95e9691eadcddc2:info 2024-12-06T10:15:46,806 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T10:15:47,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:48,121 DEBUG [master/552d6a33fa09:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-06T10:15:48,122 DEBUG [master/552d6a33fa09:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 134944dfd362da855537f8648b2c3035 changed from -1.0 to 0.0, refreshing cache 2024-12-06T10:15:48,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:48,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:48,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8b11b268c80f36c7f95e9691eadcddc2 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-06T10:15:48,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/1ce8b1fdeda04f699b46e44e80c3316e is 1080, key is row0154/info:/1733480146488/Put/seqid=0 2024-12-06T10:15:48,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741862_1038 (size=13594) 2024-12-06T10:15:48,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741862_1038 (size=13594) 2024-12-06T10:15:48,507 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/1ce8b1fdeda04f699b46e44e80c3316e 2024-12-06T10:15:48,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/1ce8b1fdeda04f699b46e44e80c3316e as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1ce8b1fdeda04f699b46e44e80c3316e 2024-12-06T10:15:48,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1ce8b1fdeda04f699b46e44e80c3316e, entries=8, sequenceid=206, filesize=13.3 K 2024-12-06T10:15:48,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=21.02 KB/21520 for 8b11b268c80f36c7f95e9691eadcddc2 in 23ms, sequenceid=206, compaction requested=false 2024-12-06T10:15:48,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:15:48,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:48,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8b11b268c80f36c7f95e9691eadcddc2 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-12-06T10:15:48,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/7daaac2c847949d384cbc6ab85ad2530 is 1080, key is row0162/info:/1733480148497/Put/seqid=0 2024-12-06T10:15:48,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8b11b268c80f36c7f95e9691eadcddc2, server=552d6a33fa09,33905,1733480086872 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T10:15:48,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:36200 deadline: 1733480158528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8b11b268c80f36c7f95e9691eadcddc2, server=552d6a33fa09,33905,1733480086872 2024-12-06T10:15:48,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741863_1039 (size=28706) 2024-12-06T10:15:48,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741863_1039 (size=28706) 2024-12-06T10:15:48,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/7daaac2c847949d384cbc6ab85ad2530 2024-12-06T10:15:48,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/7daaac2c847949d384cbc6ab85ad2530 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/7daaac2c847949d384cbc6ab85ad2530 2024-12-06T10:15:48,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/7daaac2c847949d384cbc6ab85ad2530, entries=22, sequenceid=231, filesize=28.0 K 2024-12-06T10:15:48,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=7.36 KB/7532 for 8b11b268c80f36c7f95e9691eadcddc2 in 26ms, sequenceid=231, compaction requested=true 2024-12-06T10:15:48,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:15:48,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8b11b268c80f36c7f95e9691eadcddc2:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:15:48,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:48,546 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:15:48,547 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 147082 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:15:48,547 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1540): 8b11b268c80f36c7f95e9691eadcddc2/info is initiating minor compaction (all files) 2024-12-06T10:15:48,547 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8b11b268c80f36c7f95e9691eadcddc2/info in TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:15:48,547 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/0aec1820fa6640a5b374b3090958da5e, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1ce8b1fdeda04f699b46e44e80c3316e, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/7daaac2c847949d384cbc6ab85ad2530] into tmpdir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp, totalSize=143.6 K 2024-12-06T10:15:48,548 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0aec1820fa6640a5b374b3090958da5e, keycount=92, bloomtype=ROW, size=102.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733480110275 2024-12-06T10:15:48,548 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ce8b1fdeda04f699b46e44e80c3316e, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733480146488 2024-12-06T10:15:48,548 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7daaac2c847949d384cbc6ab85ad2530, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733480148497 2024-12-06T10:15:48,560 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8b11b268c80f36c7f95e9691eadcddc2#info#compaction#59 average throughput is 41.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:15:48,561 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/49e34e638c79465e8f4acdbc0be52929 is 1080, key is row0062/info:/1733480110275/Put/seqid=0 2024-12-06T10:15:48,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741864_1040 (size=137360) 2024-12-06T10:15:48,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741864_1040 (size=137360) 2024-12-06T10:15:48,570 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/49e34e638c79465e8f4acdbc0be52929 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/49e34e638c79465e8f4acdbc0be52929 2024-12-06T10:15:48,576 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8b11b268c80f36c7f95e9691eadcddc2/info of 8b11b268c80f36c7f95e9691eadcddc2 into 49e34e638c79465e8f4acdbc0be52929(size=134.1 K), total size for store is 134.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:15:48,576 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:15:48,576 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2., storeName=8b11b268c80f36c7f95e9691eadcddc2/info, priority=13, startTime=1733480148546; duration=0sec 2024-12-06T10:15:48,576 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:15:48,576 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8b11b268c80f36c7f95e9691eadcddc2:info 2024-12-06T10:15:49,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:50,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:51,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:52,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:53,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:54,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:55,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:56,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:57,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:58,011 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 8b11b268c80f36c7f95e9691eadcddc2, had cached 0 bytes from a total of 137360 2024-12-06T10:15:58,040 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region ed4e3188ab7b31acc3d28a07be561675, had cached 0 bytes from a total of 70862 2024-12-06T10:15:58,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:15:58,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:15:58,533 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8b11b268c80f36c7f95e9691eadcddc2 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-06T10:15:58,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/b3950d318aab4375840ffcd065ccb2c6 is 1080, key is row0184/info:/1733480148521/Put/seqid=0 2024-12-06T10:15:58,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741865_1041 (size=13594) 2024-12-06T10:15:58,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741865_1041 (size=13594) 2024-12-06T10:15:58,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/b3950d318aab4375840ffcd065ccb2c6 2024-12-06T10:15:58,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/b3950d318aab4375840ffcd065ccb2c6 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b3950d318aab4375840ffcd065ccb2c6 2024-12-06T10:15:58,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b3950d318aab4375840ffcd065ccb2c6, entries=8, sequenceid=243, filesize=13.3 K 2024-12-06T10:15:58,554 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=1.05 KB/1076 for 8b11b268c80f36c7f95e9691eadcddc2 in 21ms, sequenceid=243, compaction requested=false 2024-12-06T10:15:58,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:15:59,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:16:00,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:16:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:16:00,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8b11b268c80f36c7f95e9691eadcddc2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T10:16:00,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/bd173a34f1fc4f1c8cd88d3708c84fa9 is 1080, key is row0192/info:/1733480158534/Put/seqid=0 2024-12-06T10:16:00,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741866_1042 (size=12516) 2024-12-06T10:16:00,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741866_1042 (size=12516) 2024-12-06T10:16:00,553 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/bd173a34f1fc4f1c8cd88d3708c84fa9 2024-12-06T10:16:00,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/bd173a34f1fc4f1c8cd88d3708c84fa9 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/bd173a34f1fc4f1c8cd88d3708c84fa9 2024-12-06T10:16:00,564 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/bd173a34f1fc4f1c8cd88d3708c84fa9, entries=7, sequenceid=253, filesize=12.2 K 2024-12-06T10:16:00,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 8b11b268c80f36c7f95e9691eadcddc2 in 24ms, sequenceid=253, compaction requested=true 2024-12-06T10:16:00,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:16:00,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8b11b268c80f36c7f95e9691eadcddc2:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:16:00,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:00,565 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:00,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:16:00,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8b11b268c80f36c7f95e9691eadcddc2 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-06T10:16:00,566 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 163470 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:00,566 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1540): 8b11b268c80f36c7f95e9691eadcddc2/info is initiating minor compaction (all files) 2024-12-06T10:16:00,567 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8b11b268c80f36c7f95e9691eadcddc2/info in TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:16:00,567 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/49e34e638c79465e8f4acdbc0be52929, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b3950d318aab4375840ffcd065ccb2c6, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/bd173a34f1fc4f1c8cd88d3708c84fa9] into tmpdir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp, totalSize=159.6 K 2024-12-06T10:16:00,567 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49e34e638c79465e8f4acdbc0be52929, keycount=122, bloomtype=ROW, size=134.1 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733480110275 2024-12-06T10:16:00,568 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3950d318aab4375840ffcd065ccb2c6, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733480148521 2024-12-06T10:16:00,568 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd173a34f1fc4f1c8cd88d3708c84fa9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733480158534 2024-12-06T10:16:00,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/86672259e04e44c4a94841d89ac0a4d3 is 1080, key is row0199/info:/1733480160542/Put/seqid=0 2024-12-06T10:16:00,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741867_1043 (size=29806) 2024-12-06T10:16:00,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741867_1043 (size=29806) 2024-12-06T10:16:00,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/86672259e04e44c4a94841d89ac0a4d3 2024-12-06T10:16:00,583 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8b11b268c80f36c7f95e9691eadcddc2#info#compaction#63 average throughput is 46.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:00,584 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/b1020efda48b4c63b4a517c19ff66340 is 1080, key is row0062/info:/1733480110275/Put/seqid=0 2024-12-06T10:16:00,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/86672259e04e44c4a94841d89ac0a4d3 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/86672259e04e44c4a94841d89ac0a4d3 2024-12-06T10:16:00,589 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/86672259e04e44c4a94841d89ac0a4d3, entries=23, sequenceid=279, filesize=29.1 K 2024-12-06T10:16:00,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=3.15 KB/3228 for 8b11b268c80f36c7f95e9691eadcddc2 in 24ms, sequenceid=279, compaction requested=false 2024-12-06T10:16:00,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:16:00,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741868_1044 (size=153701) 2024-12-06T10:16:00,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741868_1044 (size=153701) 2024-12-06T10:16:00,604 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/b1020efda48b4c63b4a517c19ff66340 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b1020efda48b4c63b4a517c19ff66340 2024-12-06T10:16:00,610 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8b11b268c80f36c7f95e9691eadcddc2/info of 8b11b268c80f36c7f95e9691eadcddc2 into b1020efda48b4c63b4a517c19ff66340(size=150.1 K), total size for store is 179.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:00,610 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:16:00,610 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2., storeName=8b11b268c80f36c7f95e9691eadcddc2/info, priority=13, startTime=1733480160565; duration=0sec 2024-12-06T10:16:00,610 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:00,610 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8b11b268c80f36c7f95e9691eadcddc2:info 2024-12-06T10:16:01,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:16:02,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:16:02,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta after 196120ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor238.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T10:16:02,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:16:02,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8b11b268c80f36c7f95e9691eadcddc2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T10:16:02,579 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/1ac3b55481954d2b96e891e0e2154c48 is 1080, key is row0222/info:/1733480160566/Put/seqid=0 2024-12-06T10:16:02,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741869_1045 (size=12523) 2024-12-06T10:16:02,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741869_1045 (size=12523) 2024-12-06T10:16:02,584 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/1ac3b55481954d2b96e891e0e2154c48 2024-12-06T10:16:02,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/1ac3b55481954d2b96e891e0e2154c48 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1ac3b55481954d2b96e891e0e2154c48 2024-12-06T10:16:02,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1ac3b55481954d2b96e891e0e2154c48, entries=7, sequenceid=290, filesize=12.2 K 2024-12-06T10:16:02,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 8b11b268c80f36c7f95e9691eadcddc2 in 24ms, sequenceid=290, compaction requested=true 2024-12-06T10:16:02,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:16:02,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8b11b268c80f36c7f95e9691eadcddc2:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T10:16:02,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:02,598 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T10:16:02,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33905 {}] regionserver.HRegion(8581): Flush requested on 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:16:02,599 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8b11b268c80f36c7f95e9691eadcddc2 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-06T10:16:02,600 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 196030 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T10:16:02,600 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1540): 8b11b268c80f36c7f95e9691eadcddc2/info is initiating minor compaction (all files) 2024-12-06T10:16:02,600 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8b11b268c80f36c7f95e9691eadcddc2/info in TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:16:02,600 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b1020efda48b4c63b4a517c19ff66340, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/86672259e04e44c4a94841d89ac0a4d3, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1ac3b55481954d2b96e891e0e2154c48] into tmpdir=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp, totalSize=191.4 K 2024-12-06T10:16:02,601 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1020efda48b4c63b4a517c19ff66340, keycount=137, bloomtype=ROW, size=150.1 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733480110275 2024-12-06T10:16:02,601 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86672259e04e44c4a94841d89ac0a4d3, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1733480160542 2024-12-06T10:16:02,601 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ac3b55481954d2b96e891e0e2154c48, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733480160566 2024-12-06T10:16:02,604 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/66785d67fece4739b9aaae04fb0b046b is 1080, key is row0229/info:/1733480162575/Put/seqid=0 2024-12-06T10:16:02,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741870_1046 (size=29807) 2024-12-06T10:16:02,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741870_1046 (size=29807) 2024-12-06T10:16:02,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/66785d67fece4739b9aaae04fb0b046b 2024-12-06T10:16:02,616 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8b11b268c80f36c7f95e9691eadcddc2#info#compaction#66 average throughput is 42.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T10:16:02,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/66785d67fece4739b9aaae04fb0b046b as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/66785d67fece4739b9aaae04fb0b046b 2024-12-06T10:16:02,617 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/7eb70a89057f43a19f99f7d10e5a2294 is 1080, key is row0062/info:/1733480110275/Put/seqid=0 2024-12-06T10:16:02,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/66785d67fece4739b9aaae04fb0b046b, entries=23, sequenceid=316, filesize=29.1 K 2024-12-06T10:16:02,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=5.25 KB/5380 for 8b11b268c80f36c7f95e9691eadcddc2 in 23ms, sequenceid=316, compaction requested=false 2024-12-06T10:16:02,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:16:02,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741871_1047 (size=186180) 2024-12-06T10:16:02,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741871_1047 (size=186180) 2024-12-06T10:16:02,628 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/7eb70a89057f43a19f99f7d10e5a2294 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/7eb70a89057f43a19f99f7d10e5a2294 2024-12-06T10:16:02,634 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8b11b268c80f36c7f95e9691eadcddc2/info of 8b11b268c80f36c7f95e9691eadcddc2 into 7eb70a89057f43a19f99f7d10e5a2294(size=181.8 K), total size for store is 210.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T10:16:02,634 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:16:02,634 INFO [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2., storeName=8b11b268c80f36c7f95e9691eadcddc2/info, priority=13, startTime=1733480162598; duration=0sec 2024-12-06T10:16:02,634 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T10:16:02,634 DEBUG [RS:0;552d6a33fa09:33905-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8b11b268c80f36c7f95e9691eadcddc2:info 2024-12-06T10:16:03,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:16:04,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:16:04,605 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-06T10:16:04,605 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C33905%2C1733480086872.1733480164605 2024-12-06T10:16:04,613 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872/552d6a33fa09%2C33905%2C1733480086872.1733480087254 with entries=308, filesize=306.54 KB; new WAL /user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872/552d6a33fa09%2C33905%2C1733480086872.1733480164605 2024-12-06T10:16:04,613 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32825:32825),(127.0.0.1/127.0.0.1:37659:37659)] 2024-12-06T10:16:04,613 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872/552d6a33fa09%2C33905%2C1733480086872.1733480087254 is not closed yet, will try archiving it next time 2024-12-06T10:16:04,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741833_1009 (size=313906) 2024-12-06T10:16:04,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741833_1009 (size=313906) 2024-12-06T10:16:04,617 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 8b11b268c80f36c7f95e9691eadcddc2 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-12-06T10:16:04,621 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/9da8ec8e8014471289d0c7ce0e045ccf is 1080, key is row0252/info:/1733480162600/Put/seqid=0 2024-12-06T10:16:04,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741873_1049 (size=10357) 2024-12-06T10:16:04,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741873_1049 (size=10357) 2024-12-06T10:16:04,628 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/9da8ec8e8014471289d0c7ce0e045ccf 2024-12-06T10:16:04,633 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/.tmp/info/9da8ec8e8014471289d0c7ce0e045ccf as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/9da8ec8e8014471289d0c7ce0e045ccf 2024-12-06T10:16:04,638 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/9da8ec8e8014471289d0c7ce0e045ccf, entries=5, sequenceid=325, filesize=10.1 K 2024-12-06T10:16:04,639 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 8b11b268c80f36c7f95e9691eadcddc2 in 22ms, sequenceid=325, compaction requested=true 2024-12-06T10:16:04,639 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:16:04,639 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.21 KB heapSize=4.13 KB 2024-12-06T10:16:04,644 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/.tmp/info/a9b686ec70774249ada19d90e48fd77f is 193, key is TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2./info:regioninfo/1733480113040/Put/seqid=0 2024-12-06T10:16:04,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741874_1050 (size=7803) 2024-12-06T10:16:04,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741874_1050 (size=7803) 2024-12-06T10:16:04,649 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.21 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/.tmp/info/a9b686ec70774249ada19d90e48fd77f 2024-12-06T10:16:04,654 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/.tmp/info/a9b686ec70774249ada19d90e48fd77f as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/info/a9b686ec70774249ada19d90e48fd77f 2024-12-06T10:16:04,659 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/info/a9b686ec70774249ada19d90e48fd77f, entries=16, sequenceid=24, filesize=7.6 K 2024-12-06T10:16:04,660 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~2.21 KB/2260, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=24, compaction requested=false 2024-12-06T10:16:04,660 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-06T10:16:04,660 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 134944dfd362da855537f8648b2c3035 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-06T10:16:04,682 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/namespace/134944dfd362da855537f8648b2c3035/.tmp/info/9993c8a5b3164cbbb5303dae4861cfd9 is 45, key is default/info:d/1733480088047/Put/seqid=0 2024-12-06T10:16:04,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741875_1051 (size=5037) 2024-12-06T10:16:04,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741875_1051 (size=5037) 2024-12-06T10:16:04,687 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/namespace/134944dfd362da855537f8648b2c3035/.tmp/info/9993c8a5b3164cbbb5303dae4861cfd9 2024-12-06T10:16:04,692 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/namespace/134944dfd362da855537f8648b2c3035/.tmp/info/9993c8a5b3164cbbb5303dae4861cfd9 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/namespace/134944dfd362da855537f8648b2c3035/info/9993c8a5b3164cbbb5303dae4861cfd9 2024-12-06T10:16:04,697 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/namespace/134944dfd362da855537f8648b2c3035/info/9993c8a5b3164cbbb5303dae4861cfd9, entries=2, sequenceid=6, filesize=4.9 K 2024-12-06T10:16:04,698 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 134944dfd362da855537f8648b2c3035 in 38ms, sequenceid=6, compaction requested=false 2024-12-06T10:16:04,698 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 134944dfd362da855537f8648b2c3035: 2024-12-06T10:16:04,698 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for ed4e3188ab7b31acc3d28a07be561675: 2024-12-06T10:16:04,698 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C33905%2C1733480086872.1733480164698 2024-12-06T10:16:04,706 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872/552d6a33fa09%2C33905%2C1733480086872.1733480164605 with entries=4, filesize=1.22 KB; new WAL /user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872/552d6a33fa09%2C33905%2C1733480086872.1733480164698 2024-12-06T10:16:04,707 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32825:32825),(127.0.0.1/127.0.0.1:37659:37659)] 2024-12-06T10:16:04,707 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872/552d6a33fa09%2C33905%2C1733480086872.1733480164605 is not closed yet, will try archiving it next time 2024-12-06T10:16:04,707 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872/552d6a33fa09%2C33905%2C1733480086872.1733480087254 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/oldWALs/552d6a33fa09%2C33905%2C1733480086872.1733480087254 2024-12-06T10:16:04,708 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T10:16:04,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741872_1048 (size=1255) 2024-12-06T10:16:04,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741872_1048 (size=1255) 2024-12-06T10:16:04,709 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872/552d6a33fa09%2C33905%2C1733480086872.1733480164605 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/oldWALs/552d6a33fa09%2C33905%2C1733480086872.1733480164605 2024-12-06T10:16:04,808 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T10:16:04,808 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T10:16:04,808 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31a733e5 to 127.0.0.1:60928 2024-12-06T10:16:04,808 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:16:04,808 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T10:16:04,808 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1093822040, stopped=false 2024-12-06T10:16:04,808 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=552d6a33fa09,34115,1733480086823 2024-12-06T10:16:04,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:16:04,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:16:04,810 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T10:16:04,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:04,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:04,810 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:16:04,810 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,33905,1733480086872' ***** 2024-12-06T10:16:04,810 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T10:16:04,810 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T10:16:04,810 INFO [RS:0;552d6a33fa09:33905 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T10:16:04,810 INFO [RS:0;552d6a33fa09:33905 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T10:16:04,810 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T10:16:04,810 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(3579): Received CLOSE for 8b11b268c80f36c7f95e9691eadcddc2 2024-12-06T10:16:04,811 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(3579): Received CLOSE for 134944dfd362da855537f8648b2c3035 2024-12-06T10:16:04,811 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(3579): Received CLOSE for ed4e3188ab7b31acc3d28a07be561675 2024-12-06T10:16:04,811 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,33905,1733480086872 2024-12-06T10:16:04,811 DEBUG [RS:0;552d6a33fa09:33905 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:16:04,811 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T10:16:04,811 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T10:16:04,811 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T10:16:04,811 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 8b11b268c80f36c7f95e9691eadcddc2, disabling compactions & flushes 2024-12-06T10:16:04,811 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:16:04,811 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T10:16:04,811 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:16:04,811 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:16:04,811 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. after waiting 0 ms 2024-12-06T10:16:04,811 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:16:04,811 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:16:04,811 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(1599): Waiting on 4 regions to close 2024-12-06T10:16:04,811 DEBUG [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(1603): Online Regions={8b11b268c80f36c7f95e9691eadcddc2=TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2., 1588230740=hbase:meta,,1.1588230740, 134944dfd362da855537f8648b2c3035=hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035., ed4e3188ab7b31acc3d28a07be561675=TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675.} 2024-12-06T10:16:04,812 DEBUG [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(1629): Waiting on 134944dfd362da855537f8648b2c3035, 1588230740, 8b11b268c80f36c7f95e9691eadcddc2, ed4e3188ab7b31acc3d28a07be561675 2024-12-06T10:16:04,812 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:16:04,812 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:16:04,812 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:16:04,812 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:16:04,812 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:16:04,812 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990->hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/1b1a1952f65143268ab7945362a46a0b-top, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-fc5a4b47fe2a4ed6a4a5a7f5ff9584e5, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/10ab0d1e18d64fdfb1ea28138c502ee8, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-e0ceb309a88b4f958968a2a4eb849c94, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/2a66d7bb7f5c4928b5b4baa45fc56c75, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/2834823512fe4b7c92de836f2b908924, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/d534f237195c4fd3a0b3931ce375a07e, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b61b2e9854654d8eb4d13e542186f54c, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/0aec1820fa6640a5b374b3090958da5e, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/0c93c624496a404b8ce8c84eac3a3e76, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1ce8b1fdeda04f699b46e44e80c3316e, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/49e34e638c79465e8f4acdbc0be52929, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/7daaac2c847949d384cbc6ab85ad2530, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b3950d318aab4375840ffcd065ccb2c6, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b1020efda48b4c63b4a517c19ff66340, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/bd173a34f1fc4f1c8cd88d3708c84fa9, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/86672259e04e44c4a94841d89ac0a4d3, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1ac3b55481954d2b96e891e0e2154c48] to archive 2024-12-06T10:16:04,813 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:16:04,815 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:16:04,816 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/meta/1588230740/recovered.edits/27.seqid, newMaxSeqId=27, maxSeqId=1 2024-12-06T10:16:04,816 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-fc5a4b47fe2a4ed6a4a5a7f5ff9584e5 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-fc5a4b47fe2a4ed6a4a5a7f5ff9584e5 2024-12-06T10:16:04,817 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T10:16:04,817 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T10:16:04,817 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:16:04,817 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T10:16:04,818 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/10ab0d1e18d64fdfb1ea28138c502ee8 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/10ab0d1e18d64fdfb1ea28138c502ee8 2024-12-06T10:16:04,819 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-e0ceb309a88b4f958968a2a4eb849c94 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/TestLogRolling-testLogRolling=9f98efa95d36f7a34c151ce0056e8990-e0ceb309a88b4f958968a2a4eb849c94 2024-12-06T10:16:04,820 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/2a66d7bb7f5c4928b5b4baa45fc56c75 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/2a66d7bb7f5c4928b5b4baa45fc56c75 2024-12-06T10:16:04,821 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/2834823512fe4b7c92de836f2b908924 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/2834823512fe4b7c92de836f2b908924 2024-12-06T10:16:04,822 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/d534f237195c4fd3a0b3931ce375a07e to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/d534f237195c4fd3a0b3931ce375a07e 2024-12-06T10:16:04,823 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b61b2e9854654d8eb4d13e542186f54c to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b61b2e9854654d8eb4d13e542186f54c 2024-12-06T10:16:04,824 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/0aec1820fa6640a5b374b3090958da5e to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/0aec1820fa6640a5b374b3090958da5e 2024-12-06T10:16:04,825 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/0c93c624496a404b8ce8c84eac3a3e76 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/0c93c624496a404b8ce8c84eac3a3e76 2024-12-06T10:16:04,826 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1ce8b1fdeda04f699b46e44e80c3316e to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1ce8b1fdeda04f699b46e44e80c3316e 2024-12-06T10:16:04,827 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/49e34e638c79465e8f4acdbc0be52929 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/49e34e638c79465e8f4acdbc0be52929 2024-12-06T10:16:04,828 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/7daaac2c847949d384cbc6ab85ad2530 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/7daaac2c847949d384cbc6ab85ad2530 2024-12-06T10:16:04,829 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b3950d318aab4375840ffcd065ccb2c6 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b3950d318aab4375840ffcd065ccb2c6 2024-12-06T10:16:04,830 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b1020efda48b4c63b4a517c19ff66340 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/b1020efda48b4c63b4a517c19ff66340 2024-12-06T10:16:04,831 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/bd173a34f1fc4f1c8cd88d3708c84fa9 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/bd173a34f1fc4f1c8cd88d3708c84fa9 2024-12-06T10:16:04,832 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/86672259e04e44c4a94841d89ac0a4d3 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/86672259e04e44c4a94841d89ac0a4d3 2024-12-06T10:16:04,833 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1ac3b55481954d2b96e891e0e2154c48 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/info/1ac3b55481954d2b96e891e0e2154c48 2024-12-06T10:16:04,837 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/8b11b268c80f36c7f95e9691eadcddc2/recovered.edits/328.seqid, newMaxSeqId=328, maxSeqId=121 2024-12-06T10:16:04,838 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:16:04,838 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 8b11b268c80f36c7f95e9691eadcddc2: 2024-12-06T10:16:04,838 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733480112349.8b11b268c80f36c7f95e9691eadcddc2. 2024-12-06T10:16:04,838 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 134944dfd362da855537f8648b2c3035, disabling compactions & flushes 2024-12-06T10:16:04,838 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. 2024-12-06T10:16:04,838 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. 2024-12-06T10:16:04,838 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. after waiting 0 ms 2024-12-06T10:16:04,838 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. 2024-12-06T10:16:04,842 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/hbase/namespace/134944dfd362da855537f8648b2c3035/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-06T10:16:04,842 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. 2024-12-06T10:16:04,842 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 134944dfd362da855537f8648b2c3035: 2024-12-06T10:16:04,842 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733480087635.134944dfd362da855537f8648b2c3035. 2024-12-06T10:16:04,842 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing ed4e3188ab7b31acc3d28a07be561675, disabling compactions & flushes 2024-12-06T10:16:04,842 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675. 2024-12-06T10:16:04,842 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675. 2024-12-06T10:16:04,842 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675. after waiting 0 ms 2024-12-06T10:16:04,842 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675. 2024-12-06T10:16:04,843 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675/info/1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990->hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/9f98efa95d36f7a34c151ce0056e8990/info/1b1a1952f65143268ab7945362a46a0b-bottom] to archive 2024-12-06T10:16:04,843 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T10:16:04,844 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675/info/1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990 to hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/archive/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675/info/1b1a1952f65143268ab7945362a46a0b.9f98efa95d36f7a34c151ce0056e8990 2024-12-06T10:16:04,848 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/data/default/TestLogRolling-testLogRolling/ed4e3188ab7b31acc3d28a07be561675/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=121 2024-12-06T10:16:04,848 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675. 2024-12-06T10:16:04,848 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for ed4e3188ab7b31acc3d28a07be561675: 2024-12-06T10:16:04,849 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733480112349.ed4e3188ab7b31acc3d28a07be561675. 2024-12-06T10:16:05,012 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,33905,1733480086872; all regions closed. 2024-12-06T10:16:05,012 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872 2024-12-06T10:16:05,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741834_1010 (size=9351) 2024-12-06T10:16:05,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741834_1010 (size=9351) 2024-12-06T10:16:05,017 DEBUG [RS:0;552d6a33fa09:33905 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/oldWALs 2024-12-06T10:16:05,017 INFO [RS:0;552d6a33fa09:33905 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 552d6a33fa09%2C33905%2C1733480086872.meta:.meta(num 1733480087598) 2024-12-06T10:16:05,017 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/WALs/552d6a33fa09,33905,1733480086872 2024-12-06T10:16:05,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741876_1052 (size=1071) 2024-12-06T10:16:05,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741876_1052 (size=1071) 2024-12-06T10:16:05,021 DEBUG [RS:0;552d6a33fa09:33905 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/oldWALs 2024-12-06T10:16:05,021 INFO [RS:0;552d6a33fa09:33905 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 552d6a33fa09%2C33905%2C1733480086872:(num 1733480164698) 2024-12-06T10:16:05,021 DEBUG [RS:0;552d6a33fa09:33905 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:16:05,021 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:16:05,022 INFO [RS:0;552d6a33fa09:33905 {}] hbase.ChoreService(370): Chore service for: regionserver/552d6a33fa09:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T10:16:05,022 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:16:05,022 INFO [RS:0;552d6a33fa09:33905 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33905 2024-12-06T10:16:05,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/552d6a33fa09,33905,1733480086872 2024-12-06T10:16:05,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:16:05,027 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [552d6a33fa09,33905,1733480086872] 2024-12-06T10:16:05,027 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 552d6a33fa09,33905,1733480086872; numProcessing=1 2024-12-06T10:16:05,028 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/552d6a33fa09,33905,1733480086872 already deleted, retry=false 2024-12-06T10:16:05,028 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 552d6a33fa09,33905,1733480086872 expired; onlineServers=0 2024-12-06T10:16:05,028 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,34115,1733480086823' ***** 2024-12-06T10:16:05,028 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T10:16:05,028 DEBUG [M:0;552d6a33fa09:34115 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@438df872, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:16:05,028 INFO [M:0;552d6a33fa09:34115 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,34115,1733480086823 2024-12-06T10:16:05,029 INFO [M:0;552d6a33fa09:34115 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,34115,1733480086823; all regions closed. 2024-12-06T10:16:05,029 DEBUG [M:0;552d6a33fa09:34115 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:16:05,029 DEBUG [M:0;552d6a33fa09:34115 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T10:16:05,029 DEBUG [M:0;552d6a33fa09:34115 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T10:16:05,029 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T10:16:05,029 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733480086999 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733480086999,5,FailOnTimeoutGroup] 2024-12-06T10:16:05,029 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733480086999 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733480086999,5,FailOnTimeoutGroup] 2024-12-06T10:16:05,029 INFO [M:0;552d6a33fa09:34115 {}] hbase.ChoreService(370): Chore service for: master/552d6a33fa09:0 had [] on shutdown 2024-12-06T10:16:05,029 DEBUG [M:0;552d6a33fa09:34115 {}] master.HMaster(1733): Stopping service threads 2024-12-06T10:16:05,029 INFO [M:0;552d6a33fa09:34115 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T10:16:05,029 INFO [M:0;552d6a33fa09:34115 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T10:16:05,029 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T10:16:05,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T10:16:05,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:05,030 DEBUG [M:0;552d6a33fa09:34115 {}] zookeeper.ZKUtil(347): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T10:16:05,030 WARN [M:0;552d6a33fa09:34115 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T10:16:05,030 INFO [M:0;552d6a33fa09:34115 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T10:16:05,030 INFO [M:0;552d6a33fa09:34115 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T10:16:05,030 DEBUG [M:0;552d6a33fa09:34115 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:16:05,030 INFO [M:0;552d6a33fa09:34115 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:05,030 DEBUG [M:0;552d6a33fa09:34115 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:05,030 DEBUG [M:0;552d6a33fa09:34115 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:16:05,030 DEBUG [M:0;552d6a33fa09:34115 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:05,030 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:16:05,030 INFO [M:0;552d6a33fa09:34115 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=66.44 KB heapSize=81.69 KB 2024-12-06T10:16:05,046 DEBUG [M:0;552d6a33fa09:34115 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c64692e5ad145ec965410a474172666 is 82, key is hbase:meta,,1/info:regioninfo/1733480087616/Put/seqid=0 2024-12-06T10:16:05,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741877_1053 (size=5672) 2024-12-06T10:16:05,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741877_1053 (size=5672) 2024-12-06T10:16:05,052 INFO [M:0;552d6a33fa09:34115 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c64692e5ad145ec965410a474172666 2024-12-06T10:16:05,071 DEBUG [M:0;552d6a33fa09:34115 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/656e4ff94fc049d581ecd0a9fb7dad89 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733480088466/Put/seqid=0 2024-12-06T10:16:05,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741878_1054 (size=7285) 2024-12-06T10:16:05,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741878_1054 (size=7285) 2024-12-06T10:16:05,076 INFO [M:0;552d6a33fa09:34115 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65.83 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/656e4ff94fc049d581ecd0a9fb7dad89 2024-12-06T10:16:05,080 INFO [M:0;552d6a33fa09:34115 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 656e4ff94fc049d581ecd0a9fb7dad89 2024-12-06T10:16:05,094 DEBUG [M:0;552d6a33fa09:34115 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2151f7ec38d649d7aa93e5f6d041072c is 69, key is 552d6a33fa09,33905,1733480086872/rs:state/1733480087107/Put/seqid=0 2024-12-06T10:16:05,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741879_1055 (size=5156) 2024-12-06T10:16:05,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741879_1055 (size=5156) 2024-12-06T10:16:05,099 INFO [M:0;552d6a33fa09:34115 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2151f7ec38d649d7aa93e5f6d041072c 2024-12-06T10:16:05,118 DEBUG [M:0;552d6a33fa09:34115 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ed0d00252cba4e5e8baac818810f824d is 52, key is load_balancer_on/state:d/1733480088099/Put/seqid=0 2024-12-06T10:16:05,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741880_1056 (size=5056) 2024-12-06T10:16:05,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741880_1056 (size=5056) 2024-12-06T10:16:05,123 INFO [M:0;552d6a33fa09:34115 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ed0d00252cba4e5e8baac818810f824d 2024-12-06T10:16:05,123 INFO [regionserver/552d6a33fa09:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:16:05,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:16:05,127 INFO [RS:0;552d6a33fa09:33905 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,33905,1733480086872; zookeeper connection closed. 2024-12-06T10:16:05,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10066d2b5fe0001, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:16:05,127 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7e45dd6b {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7e45dd6b 2024-12-06T10:16:05,128 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T10:16:05,129 DEBUG [M:0;552d6a33fa09:34115 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c64692e5ad145ec965410a474172666 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c64692e5ad145ec965410a474172666 2024-12-06T10:16:05,133 INFO [M:0;552d6a33fa09:34115 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c64692e5ad145ec965410a474172666, entries=8, sequenceid=164, filesize=5.5 K 2024-12-06T10:16:05,133 DEBUG [M:0;552d6a33fa09:34115 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/656e4ff94fc049d581ecd0a9fb7dad89 as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/656e4ff94fc049d581ecd0a9fb7dad89 2024-12-06T10:16:05,137 INFO [M:0;552d6a33fa09:34115 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 656e4ff94fc049d581ecd0a9fb7dad89 2024-12-06T10:16:05,137 INFO [M:0;552d6a33fa09:34115 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/656e4ff94fc049d581ecd0a9fb7dad89, entries=18, sequenceid=164, filesize=7.1 K 2024-12-06T10:16:05,138 DEBUG [M:0;552d6a33fa09:34115 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2151f7ec38d649d7aa93e5f6d041072c as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2151f7ec38d649d7aa93e5f6d041072c 2024-12-06T10:16:05,142 INFO [M:0;552d6a33fa09:34115 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2151f7ec38d649d7aa93e5f6d041072c, entries=1, sequenceid=164, filesize=5.0 K 2024-12-06T10:16:05,142 DEBUG [M:0;552d6a33fa09:34115 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ed0d00252cba4e5e8baac818810f824d as hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ed0d00252cba4e5e8baac818810f824d 2024-12-06T10:16:05,146 INFO [M:0;552d6a33fa09:34115 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45939/user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ed0d00252cba4e5e8baac818810f824d, entries=1, sequenceid=164, filesize=4.9 K 2024-12-06T10:16:05,147 INFO [M:0;552d6a33fa09:34115 {}] regionserver.HRegion(3040): Finished flush of dataSize ~66.44 KB/68031, heapSize ~81.63 KB/83584, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=164, compaction requested=false 2024-12-06T10:16:05,149 INFO [M:0;552d6a33fa09:34115 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:05,149 DEBUG [M:0;552d6a33fa09:34115 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:16:05,149 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/25520228-7ee5-bfde-1f13-20c3c50c78c9/MasterData/WALs/552d6a33fa09,34115,1733480086823 2024-12-06T10:16:05,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741830_1006 (size=79260) 2024-12-06T10:16:05,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741830_1006 (size=79260) 2024-12-06T10:16:05,151 INFO [M:0;552d6a33fa09:34115 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T10:16:05,151 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:16:05,151 INFO [M:0;552d6a33fa09:34115 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:34115 2024-12-06T10:16:05,153 DEBUG [M:0;552d6a33fa09:34115 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/552d6a33fa09,34115,1733480086823 already deleted, retry=false 2024-12-06T10:16:05,196 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-06T10:16:05,196 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-06T10:16:05,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:16:05,255 INFO [M:0;552d6a33fa09:34115 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,34115,1733480086823; zookeeper connection closed. 2024-12-06T10:16:05,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34115-0x10066d2b5fe0000, quorum=127.0.0.1:60928, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:16:05,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59d12a70{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:16:05,258 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@57c0ad86{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:16:05,258 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:16:05,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@52ad931f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:16:05,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15bff48f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/hadoop.log.dir/,STOPPED} 2024-12-06T10:16:05,259 WARN [BP-1503095463-172.17.0.2-1733480086169 heartbeating to localhost/127.0.0.1:45939 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:16:05,259 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:16:05,259 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:16:05,259 WARN [BP-1503095463-172.17.0.2-1733480086169 heartbeating to localhost/127.0.0.1:45939 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1503095463-172.17.0.2-1733480086169 (Datanode Uuid 09e2e145-e660-4237-b5eb-a2ef5392eb25) service to localhost/127.0.0.1:45939 2024-12-06T10:16:05,260 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/cluster_f47b1158-56be-0951-f760-b117b6f7ac93/dfs/data/data3/current/BP-1503095463-172.17.0.2-1733480086169 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:16:05,260 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/cluster_f47b1158-56be-0951-f760-b117b6f7ac93/dfs/data/data4/current/BP-1503095463-172.17.0.2-1733480086169 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:16:05,260 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:16:05,262 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60d32c96{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:16:05,263 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@407f2320{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:16:05,263 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:16:05,263 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4aeec8df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:16:05,263 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44bd9a2a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/hadoop.log.dir/,STOPPED} 2024-12-06T10:16:05,264 WARN [BP-1503095463-172.17.0.2-1733480086169 heartbeating to localhost/127.0.0.1:45939 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:16:05,264 WARN [BP-1503095463-172.17.0.2-1733480086169 heartbeating to localhost/127.0.0.1:45939 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1503095463-172.17.0.2-1733480086169 (Datanode Uuid c7cc0e52-ac7a-4413-8cf7-4bd2f7990889) service to localhost/127.0.0.1:45939 2024-12-06T10:16:05,264 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:16:05,264 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:16:05,265 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/cluster_f47b1158-56be-0951-f760-b117b6f7ac93/dfs/data/data1/current/BP-1503095463-172.17.0.2-1733480086169 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:16:05,265 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/cluster_f47b1158-56be-0951-f760-b117b6f7ac93/dfs/data/data2/current/BP-1503095463-172.17.0.2-1733480086169 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:16:05,265 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:16:05,271 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@491e4eb5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:16:05,272 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e57ffb4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:16:05,272 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:16:05,272 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1849e93e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:16:05,272 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a33d33a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/hadoop.log.dir/,STOPPED} 2024-12-06T10:16:05,280 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T10:16:05,307 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-06T10:16:05,315 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=125 (was 111) - Thread LEAK? -, OpenFileDescriptor=496 (was 464) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=114 (was 126), ProcessCount=11 (was 11), AvailableMemoryMB=6456 (was 6523) 2024-12-06T10:16:05,323 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=125, OpenFileDescriptor=496, MaxFileDescriptor=1048576, SystemLoadAverage=114, ProcessCount=11, AvailableMemoryMB=6456 2024-12-06T10:16:05,323 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T10:16:05,323 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/hadoop.log.dir so I do NOT create it in target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa 2024-12-06T10:16:05,323 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b5e2c26b-4fc7-8a2a-da37-f39b0c13037f/hadoop.tmp.dir so I do NOT create it in target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa 2024-12-06T10:16:05,323 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/cluster_1d671e7a-800e-9ad0-e47a-79a6d003e698, deleteOnExit=true 2024-12-06T10:16:05,323 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T10:16:05,324 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/test.cache.data in system properties and HBase conf 2024-12-06T10:16:05,324 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T10:16:05,324 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/hadoop.log.dir in system properties and HBase conf 2024-12-06T10:16:05,324 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T10:16:05,324 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T10:16:05,324 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T10:16:05,324 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T10:16:05,324 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:16:05,324 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T10:16:05,324 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T10:16:05,324 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:16:05,324 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T10:16:05,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T10:16:05,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T10:16:05,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:16:05,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T10:16:05,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/nfs.dump.dir in system properties and HBase conf 2024-12-06T10:16:05,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/java.io.tmpdir in system properties and HBase conf 2024-12-06T10:16:05,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T10:16:05,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T10:16:05,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T10:16:05,338 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T10:16:05,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:16:05,438 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:16:05,444 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:16:05,445 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:16:05,445 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:16:05,445 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T10:16:05,446 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:16:05,446 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16cb9ccf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:16:05,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@571fe688{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:16:05,591 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25079c3d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/java.io.tmpdir/jetty-localhost-38117-hadoop-hdfs-3_4_1-tests_jar-_-any-969356845885346094/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:16:05,592 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64cf7e05{HTTP/1.1, (http/1.1)}{localhost:38117} 2024-12-06T10:16:05,592 INFO [Time-limited test {}] server.Server(415): Started @366883ms 2024-12-06T10:16:05,604 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T10:16:05,666 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:16:05,669 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:16:05,670 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:16:05,670 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:16:05,670 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:16:05,670 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77481e88{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:16:05,671 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3cc77b22{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:16:05,785 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@66e0ac38{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/java.io.tmpdir/jetty-localhost-38903-hadoop-hdfs-3_4_1-tests_jar-_-any-1597119661600729916/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:16:05,785 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@67c014bf{HTTP/1.1, (http/1.1)}{localhost:38903} 2024-12-06T10:16:05,785 INFO [Time-limited test {}] server.Server(415): Started @367077ms 2024-12-06T10:16:05,786 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:16:05,816 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T10:16:05,819 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T10:16:05,820 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T10:16:05,820 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T10:16:05,820 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T10:16:05,821 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49726ed6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/hadoop.log.dir/,AVAILABLE} 2024-12-06T10:16:05,821 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@12527899{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T10:16:05,870 WARN [Thread-2188 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/cluster_1d671e7a-800e-9ad0-e47a-79a6d003e698/dfs/data/data2/current/BP-1836740515-172.17.0.2-1733480165363/current, will proceed with Du for space computation calculation, 2024-12-06T10:16:05,870 WARN [Thread-2187 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/cluster_1d671e7a-800e-9ad0-e47a-79a6d003e698/dfs/data/data1/current/BP-1836740515-172.17.0.2-1733480165363/current, will proceed with Du for space computation calculation, 2024-12-06T10:16:05,893 WARN [Thread-2166 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:16:05,895 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc0b7be28da95b576 with lease ID 0xcc4353e80929d5d1: Processing first storage report for DS-02d0d04a-9256-4be4-8059-394bd94b2732 from datanode DatanodeRegistration(127.0.0.1:35931, datanodeUuid=34353ccf-ee75-4930-8db0-f3dd99224883, infoPort=35129, infoSecurePort=0, ipcPort=39341, storageInfo=lv=-57;cid=testClusterID;nsid=1343471445;c=1733480165363) 2024-12-06T10:16:05,895 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc0b7be28da95b576 with lease ID 0xcc4353e80929d5d1: from storage DS-02d0d04a-9256-4be4-8059-394bd94b2732 node DatanodeRegistration(127.0.0.1:35931, datanodeUuid=34353ccf-ee75-4930-8db0-f3dd99224883, infoPort=35129, infoSecurePort=0, ipcPort=39341, storageInfo=lv=-57;cid=testClusterID;nsid=1343471445;c=1733480165363), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:16:05,896 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc0b7be28da95b576 with lease ID 0xcc4353e80929d5d1: Processing first storage report for DS-b3505c58-d3fe-42db-9591-1713519ec8d1 from datanode DatanodeRegistration(127.0.0.1:35931, datanodeUuid=34353ccf-ee75-4930-8db0-f3dd99224883, infoPort=35129, infoSecurePort=0, ipcPort=39341, storageInfo=lv=-57;cid=testClusterID;nsid=1343471445;c=1733480165363) 2024-12-06T10:16:05,896 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc0b7be28da95b576 with lease ID 0xcc4353e80929d5d1: from storage DS-b3505c58-d3fe-42db-9591-1713519ec8d1 node DatanodeRegistration(127.0.0.1:35931, datanodeUuid=34353ccf-ee75-4930-8db0-f3dd99224883, infoPort=35129, infoSecurePort=0, ipcPort=39341, storageInfo=lv=-57;cid=testClusterID;nsid=1343471445;c=1733480165363), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:16:05,938 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d43e104{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/java.io.tmpdir/jetty-localhost-34115-hadoop-hdfs-3_4_1-tests_jar-_-any-14467816115470871289/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:16:05,938 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@660987f3{HTTP/1.1, (http/1.1)}{localhost:34115} 2024-12-06T10:16:05,938 INFO [Time-limited test {}] server.Server(415): Started @367230ms 2024-12-06T10:16:05,939 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T10:16:06,017 WARN [Thread-2214 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/cluster_1d671e7a-800e-9ad0-e47a-79a6d003e698/dfs/data/data4/current/BP-1836740515-172.17.0.2-1733480165363/current, will proceed with Du for space computation calculation, 2024-12-06T10:16:06,017 WARN [Thread-2213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/cluster_1d671e7a-800e-9ad0-e47a-79a6d003e698/dfs/data/data3/current/BP-1836740515-172.17.0.2-1733480165363/current, will proceed with Du for space computation calculation, 2024-12-06T10:16:06,034 WARN [Thread-2202 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T10:16:06,036 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9c3449d9291c6851 with lease ID 0xcc4353e80929d5d2: Processing first storage report for DS-ee832e88-5c55-4ffb-9f0c-38d3dacc327e from datanode DatanodeRegistration(127.0.0.1:40431, datanodeUuid=efced11e-bb85-45f7-82fa-86ebe1e567de, infoPort=35351, infoSecurePort=0, ipcPort=38425, storageInfo=lv=-57;cid=testClusterID;nsid=1343471445;c=1733480165363) 2024-12-06T10:16:06,036 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c3449d9291c6851 with lease ID 0xcc4353e80929d5d2: from storage DS-ee832e88-5c55-4ffb-9f0c-38d3dacc327e node DatanodeRegistration(127.0.0.1:40431, datanodeUuid=efced11e-bb85-45f7-82fa-86ebe1e567de, infoPort=35351, infoSecurePort=0, ipcPort=38425, storageInfo=lv=-57;cid=testClusterID;nsid=1343471445;c=1733480165363), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T10:16:06,036 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9c3449d9291c6851 with lease ID 0xcc4353e80929d5d2: Processing first storage report for DS-d5350803-70e7-469a-9d00-6e385577b8b9 from datanode DatanodeRegistration(127.0.0.1:40431, datanodeUuid=efced11e-bb85-45f7-82fa-86ebe1e567de, infoPort=35351, infoSecurePort=0, ipcPort=38425, storageInfo=lv=-57;cid=testClusterID;nsid=1343471445;c=1733480165363) 2024-12-06T10:16:06,036 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c3449d9291c6851 with lease ID 0xcc4353e80929d5d2: from storage DS-d5350803-70e7-469a-9d00-6e385577b8b9 node DatanodeRegistration(127.0.0.1:40431, datanodeUuid=efced11e-bb85-45f7-82fa-86ebe1e567de, infoPort=35351, infoSecurePort=0, ipcPort=38425, storageInfo=lv=-57;cid=testClusterID;nsid=1343471445;c=1733480165363), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T10:16:06,062 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa 2024-12-06T10:16:06,065 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/cluster_1d671e7a-800e-9ad0-e47a-79a6d003e698/zookeeper_0, clientPort=63512, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/cluster_1d671e7a-800e-9ad0-e47a-79a6d003e698/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/cluster_1d671e7a-800e-9ad0-e47a-79a6d003e698/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T10:16:06,066 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=63512 2024-12-06T10:16:06,066 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:06,067 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:06,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741825_1001 (size=7) 2024-12-06T10:16:06,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741825_1001 (size=7) 2024-12-06T10:16:06,076 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe with version=8 2024-12-06T10:16:06,076 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:38219/user/jenkins/test-data/f58bdebe-36e7-c8f3-e9f0-1700685342d9/hbase-staging 2024-12-06T10:16:06,078 INFO [Time-limited test {}] client.ConnectionUtils(129): master/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:16:06,078 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:16:06,078 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:16:06,078 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:16:06,078 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:16:06,078 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:16:06,078 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:16:06,078 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:16:06,079 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:32989 2024-12-06T10:16:06,079 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:06,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:06,082 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:32989 connecting to ZooKeeper ensemble=127.0.0.1:63512 2024-12-06T10:16:06,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:329890x0, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:16:06,088 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32989-0x10066d3eb960000 connected 2024-12-06T10:16:06,100 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:16:06,100 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:16:06,101 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:16:06,104 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32989 2024-12-06T10:16:06,106 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32989 2024-12-06T10:16:06,106 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32989 2024-12-06T10:16:06,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32989 2024-12-06T10:16:06,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32989 2024-12-06T10:16:06,112 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe, hbase.cluster.distributed=false 2024-12-06T10:16:06,128 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/552d6a33fa09:0 server-side Connection retries=45 2024-12-06T10:16:06,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:16:06,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T10:16:06,128 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T10:16:06,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T10:16:06,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T10:16:06,128 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T10:16:06,128 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T10:16:06,129 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45007 2024-12-06T10:16:06,129 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T10:16:06,131 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T10:16:06,132 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:06,134 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:06,136 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45007 connecting to ZooKeeper ensemble=127.0.0.1:63512 2024-12-06T10:16:06,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:450070x0, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T10:16:06,139 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:450070x0, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:16:06,140 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45007-0x10066d3eb960001 connected 2024-12-06T10:16:06,140 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:16:06,141 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T10:16:06,144 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45007 2024-12-06T10:16:06,144 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45007 2024-12-06T10:16:06,144 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45007 2024-12-06T10:16:06,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45007 2024-12-06T10:16:06,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45007 2024-12-06T10:16:06,149 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/552d6a33fa09,32989,1733480166077 2024-12-06T10:16:06,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:16:06,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:16:06,151 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/552d6a33fa09,32989,1733480166077 2024-12-06T10:16:06,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:16:06,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T10:16:06,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:06,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:06,156 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:16:06,156 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/552d6a33fa09,32989,1733480166077 from backup master directory 2024-12-06T10:16:06,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/552d6a33fa09,32989,1733480166077 2024-12-06T10:16:06,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:16:06,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T10:16:06,158 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:16:06,158 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T10:16:06,158 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=552d6a33fa09,32989,1733480166077 2024-12-06T10:16:06,166 DEBUG [M:0;552d6a33fa09:32989 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;552d6a33fa09:32989 2024-12-06T10:16:06,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741826_1002 (size=42) 2024-12-06T10:16:06,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741826_1002 (size=42) 2024-12-06T10:16:06,174 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/hbase.id with ID: d5125007-746a-468f-977c-d2604ec07158 2024-12-06T10:16:06,182 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:06,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:06,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:06,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741827_1003 (size=196) 2024-12-06T10:16:06,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741827_1003 (size=196) 2024-12-06T10:16:06,194 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T10:16:06,195 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T10:16:06,196 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:16:06,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:16:06,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741828_1004 (size=1189) 2024-12-06T10:16:06,206 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store 2024-12-06T10:16:06,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741829_1005 (size=34) 2024-12-06T10:16:06,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741829_1005 (size=34) 2024-12-06T10:16:06,212 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:06,213 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:16:06,213 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:06,213 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:06,213 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:16:06,213 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:06,213 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:06,213 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:16:06,214 WARN [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/.initializing 2024-12-06T10:16:06,214 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/WALs/552d6a33fa09,32989,1733480166077 2024-12-06T10:16:06,216 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C32989%2C1733480166077, suffix=, logDir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/WALs/552d6a33fa09,32989,1733480166077, archiveDir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/oldWALs, maxLogs=10 2024-12-06T10:16:06,216 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C32989%2C1733480166077.1733480166216 2024-12-06T10:16:06,220 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/WALs/552d6a33fa09,32989,1733480166077/552d6a33fa09%2C32989%2C1733480166077.1733480166216 2024-12-06T10:16:06,221 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35351:35351),(127.0.0.1/127.0.0.1:35129:35129)] 2024-12-06T10:16:06,221 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:16:06,221 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:06,221 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:06,221 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:06,222 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:06,223 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T10:16:06,223 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:06,224 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:06,224 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:06,225 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T10:16:06,225 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:06,225 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:16:06,225 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:06,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T10:16:06,226 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:06,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:16:06,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:06,227 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T10:16:06,227 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:06,228 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:16:06,228 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:06,228 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:06,230 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T10:16:06,231 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T10:16:06,233 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:16:06,233 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729113, jitterRate=-0.07288585603237152}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T10:16:06,234 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:16:06,234 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T10:16:06,237 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@391c7334, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:06,238 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T10:16:06,238 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T10:16:06,238 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T10:16:06,238 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T10:16:06,238 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T10:16:06,239 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-06T10:16:06,239 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T10:16:06,240 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T10:16:06,241 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T10:16:06,242 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T10:16:06,242 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T10:16:06,243 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T10:16:06,244 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T10:16:06,244 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T10:16:06,245 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T10:16:06,246 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T10:16:06,246 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T10:16:06,247 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T10:16:06,248 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T10:16:06,249 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T10:16:06,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:16:06,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T10:16:06,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:06,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:06,251 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=552d6a33fa09,32989,1733480166077, sessionid=0x10066d3eb960000, setting cluster-up flag (Was=false) 2024-12-06T10:16:06,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:06,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:06,258 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T10:16:06,258 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,32989,1733480166077 2024-12-06T10:16:06,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:06,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:06,265 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T10:16:06,265 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=552d6a33fa09,32989,1733480166077 2024-12-06T10:16:06,267 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T10:16:06,268 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T10:16:06,268 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T10:16:06,268 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 552d6a33fa09,32989,1733480166077 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T10:16:06,268 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:16:06,268 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:16:06,268 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:16:06,268 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/552d6a33fa09:0, corePoolSize=5, maxPoolSize=5 2024-12-06T10:16:06,268 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/552d6a33fa09:0, corePoolSize=10, maxPoolSize=10 2024-12-06T10:16:06,268 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:06,268 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:16:06,268 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:06,271 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733480196271 2024-12-06T10:16:06,271 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T10:16:06,272 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T10:16:06,272 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T10:16:06,272 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T10:16:06,272 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T10:16:06,272 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T10:16:06,272 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:16:06,272 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T10:16:06,272 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,272 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T10:16:06,272 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T10:16:06,272 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T10:16:06,273 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T10:16:06,273 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T10:16:06,273 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733480166273,5,FailOnTimeoutGroup] 2024-12-06T10:16:06,273 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:06,273 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733480166273,5,FailOnTimeoutGroup] 2024-12-06T10:16:06,273 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,273 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T10:16:06,273 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,273 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:16:06,273 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741831_1007 (size=1039) 2024-12-06T10:16:06,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741831_1007 (size=1039) 2024-12-06T10:16:06,280 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T10:16:06,280 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe 2024-12-06T10:16:06,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741832_1008 (size=32) 2024-12-06T10:16:06,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741832_1008 (size=32) 2024-12-06T10:16:06,286 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:06,287 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:16:06,288 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:16:06,288 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:06,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:06,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:16:06,290 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:16:06,290 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:06,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:06,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:16:06,291 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:16:06,291 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:06,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:06,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740 2024-12-06T10:16:06,293 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740 2024-12-06T10:16:06,294 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:16:06,295 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:16:06,296 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:16:06,297 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742523, jitterRate=-0.05583372712135315}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:16:06,297 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:16:06,297 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:16:06,297 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:16:06,297 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:16:06,297 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:16:06,297 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:16:06,297 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T10:16:06,297 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:16:06,298 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T10:16:06,298 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T10:16:06,298 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T10:16:06,299 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T10:16:06,300 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T10:16:06,360 DEBUG [RS:0;552d6a33fa09:45007 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;552d6a33fa09:45007 2024-12-06T10:16:06,361 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(1008): ClusterId : d5125007-746a-468f-977c-d2604ec07158 2024-12-06T10:16:06,361 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T10:16:06,363 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T10:16:06,363 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T10:16:06,365 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T10:16:06,365 DEBUG [RS:0;552d6a33fa09:45007 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@685dc92f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:06,365 DEBUG [RS:0;552d6a33fa09:45007 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@506a53df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:16:06,365 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T10:16:06,365 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T10:16:06,365 DEBUG [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T10:16:06,366 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(3073): reportForDuty to master=552d6a33fa09,32989,1733480166077 with isa=552d6a33fa09/172.17.0.2:45007, startcode=1733480166127 2024-12-06T10:16:06,366 DEBUG [RS:0;552d6a33fa09:45007 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T10:16:06,368 INFO [RS-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57049, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T10:16:06,368 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32989 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 552d6a33fa09,45007,1733480166127 2024-12-06T10:16:06,368 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32989 {}] master.ServerManager(486): Registering regionserver=552d6a33fa09,45007,1733480166127 2024-12-06T10:16:06,369 DEBUG [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe 2024-12-06T10:16:06,370 DEBUG [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:46331 2024-12-06T10:16:06,370 DEBUG [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T10:16:06,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:16:06,371 DEBUG [RS:0;552d6a33fa09:45007 {}] zookeeper.ZKUtil(111): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/552d6a33fa09,45007,1733480166127 2024-12-06T10:16:06,371 WARN [RS:0;552d6a33fa09:45007 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T10:16:06,371 INFO [RS:0;552d6a33fa09:45007 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:16:06,371 DEBUG [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/552d6a33fa09,45007,1733480166127 2024-12-06T10:16:06,372 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [552d6a33fa09,45007,1733480166127] 2024-12-06T10:16:06,374 DEBUG [RS:0;552d6a33fa09:45007 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T10:16:06,374 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T10:16:06,376 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T10:16:06,376 INFO [RS:0;552d6a33fa09:45007 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T10:16:06,376 INFO [RS:0;552d6a33fa09:45007 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,376 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T10:16:06,377 INFO [RS:0;552d6a33fa09:45007 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,377 DEBUG [RS:0;552d6a33fa09:45007 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:06,377 DEBUG [RS:0;552d6a33fa09:45007 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:06,377 DEBUG [RS:0;552d6a33fa09:45007 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:06,377 DEBUG [RS:0;552d6a33fa09:45007 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:06,377 DEBUG [RS:0;552d6a33fa09:45007 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:06,377 DEBUG [RS:0;552d6a33fa09:45007 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/552d6a33fa09:0, corePoolSize=2, maxPoolSize=2 2024-12-06T10:16:06,378 DEBUG [RS:0;552d6a33fa09:45007 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:06,378 DEBUG [RS:0;552d6a33fa09:45007 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:06,378 DEBUG [RS:0;552d6a33fa09:45007 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:06,378 DEBUG [RS:0;552d6a33fa09:45007 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:06,378 DEBUG [RS:0;552d6a33fa09:45007 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/552d6a33fa09:0, corePoolSize=1, maxPoolSize=1 2024-12-06T10:16:06,378 DEBUG [RS:0;552d6a33fa09:45007 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:16:06,378 DEBUG [RS:0;552d6a33fa09:45007 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/552d6a33fa09:0, corePoolSize=3, maxPoolSize=3 2024-12-06T10:16:06,378 INFO [RS:0;552d6a33fa09:45007 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,378 INFO [RS:0;552d6a33fa09:45007 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,378 INFO [RS:0;552d6a33fa09:45007 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,378 INFO [RS:0;552d6a33fa09:45007 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,378 INFO [RS:0;552d6a33fa09:45007 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,45007,1733480166127-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:16:06,392 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T10:16:06,392 INFO [RS:0;552d6a33fa09:45007 {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,45007,1733480166127-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,406 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.Replication(204): 552d6a33fa09,45007,1733480166127 started 2024-12-06T10:16:06,406 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(1767): Serving as 552d6a33fa09,45007,1733480166127, RpcServer on 552d6a33fa09/172.17.0.2:45007, sessionid=0x10066d3eb960001 2024-12-06T10:16:06,406 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T10:16:06,406 DEBUG [RS:0;552d6a33fa09:45007 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 552d6a33fa09,45007,1733480166127 2024-12-06T10:16:06,406 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,45007,1733480166127' 2024-12-06T10:16:06,406 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T10:16:06,407 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T10:16:06,407 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T10:16:06,407 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T10:16:06,407 DEBUG [RS:0;552d6a33fa09:45007 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 552d6a33fa09,45007,1733480166127 2024-12-06T10:16:06,407 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '552d6a33fa09,45007,1733480166127' 2024-12-06T10:16:06,407 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T10:16:06,407 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T10:16:06,407 DEBUG [RS:0;552d6a33fa09:45007 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T10:16:06,407 INFO [RS:0;552d6a33fa09:45007 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T10:16:06,407 INFO [RS:0;552d6a33fa09:45007 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T10:16:06,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:16:06,450 WARN [552d6a33fa09:32989 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-06T10:16:06,509 INFO [RS:0;552d6a33fa09:45007 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C45007%2C1733480166127, suffix=, logDir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/552d6a33fa09,45007,1733480166127, archiveDir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/oldWALs, maxLogs=32 2024-12-06T10:16:06,510 INFO [RS:0;552d6a33fa09:45007 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45007%2C1733480166127.1733480166510 2024-12-06T10:16:06,521 INFO [RS:0;552d6a33fa09:45007 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/552d6a33fa09,45007,1733480166127/552d6a33fa09%2C45007%2C1733480166127.1733480166510 2024-12-06T10:16:06,521 DEBUG [RS:0;552d6a33fa09:45007 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35351:35351),(127.0.0.1/127.0.0.1:35129:35129)] 2024-12-06T10:16:06,700 DEBUG [552d6a33fa09:32989 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T10:16:06,701 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=552d6a33fa09,45007,1733480166127 2024-12-06T10:16:06,701 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,45007,1733480166127, state=OPENING 2024-12-06T10:16:06,703 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T10:16:06,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:06,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:06,705 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=552d6a33fa09,45007,1733480166127}] 2024-12-06T10:16:06,705 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:16:06,705 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:16:06,857 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,45007,1733480166127 2024-12-06T10:16:06,857 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T10:16:06,859 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36344, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T10:16:06,862 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T10:16:06,862 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:16:06,864 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=552d6a33fa09%2C45007%2C1733480166127.meta, suffix=.meta, logDir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/552d6a33fa09,45007,1733480166127, archiveDir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/oldWALs, maxLogs=32 2024-12-06T10:16:06,864 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 552d6a33fa09%2C45007%2C1733480166127.meta.1733480166864.meta 2024-12-06T10:16:06,869 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/552d6a33fa09,45007,1733480166127/552d6a33fa09%2C45007%2C1733480166127.meta.1733480166864.meta 2024-12-06T10:16:06,869 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35351:35351),(127.0.0.1/127.0.0.1:35129:35129)] 2024-12-06T10:16:06,869 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:16:06,870 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T10:16:06,870 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T10:16:06,870 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T10:16:06,870 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T10:16:06,870 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:06,870 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T10:16:06,870 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T10:16:06,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T10:16:06,872 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T10:16:06,872 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:06,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:06,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T10:16:06,873 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T10:16:06,873 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:06,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:06,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T10:16:06,874 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T10:16:06,874 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:06,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T10:16:06,875 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740 2024-12-06T10:16:06,876 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740 2024-12-06T10:16:06,878 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T10:16:06,879 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T10:16:06,879 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=754925, jitterRate=-0.04006418585777283}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T10:16:06,879 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T10:16:06,880 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733480166857 2024-12-06T10:16:06,882 DEBUG [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T10:16:06,882 INFO [RS_OPEN_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T10:16:06,882 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,45007,1733480166127 2024-12-06T10:16:06,883 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 552d6a33fa09,45007,1733480166127, state=OPEN 2024-12-06T10:16:06,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:16:06,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T10:16:06,887 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:16:06,887 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T10:16:06,888 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T10:16:06,888 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=552d6a33fa09,45007,1733480166127 in 183 msec 2024-12-06T10:16:06,890 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T10:16:06,890 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 590 msec 2024-12-06T10:16:06,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 624 msec 2024-12-06T10:16:06,892 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733480166892, completionTime=-1 2024-12-06T10:16:06,892 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T10:16:06,892 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T10:16:06,892 DEBUG [hconnection-0x406a12ef-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:06,894 INFO [RS-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36348, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:06,895 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T10:16:06,895 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733480226895 2024-12-06T10:16:06,895 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733480286895 2024-12-06T10:16:06,895 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 2 msec 2024-12-06T10:16:06,899 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,32989,1733480166077-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,899 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,32989,1733480166077-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,900 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,32989,1733480166077-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,900 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-552d6a33fa09:32989, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,900 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:06,900 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T10:16:06,900 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T10:16:06,901 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T10:16:06,901 DEBUG [master/552d6a33fa09:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T10:16:06,902 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T10:16:06,902 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:06,903 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T10:16:06,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741835_1011 (size=358) 2024-12-06T10:16:06,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741835_1011 (size=358) 2024-12-06T10:16:06,910 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e23dcdaafd855359c69c28f3729df93f, NAME => 'hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe 2024-12-06T10:16:06,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741836_1012 (size=42) 2024-12-06T10:16:06,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741836_1012 (size=42) 2024-12-06T10:16:06,918 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:06,918 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing e23dcdaafd855359c69c28f3729df93f, disabling compactions & flushes 2024-12-06T10:16:06,918 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. 2024-12-06T10:16:06,918 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. 2024-12-06T10:16:06,918 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. after waiting 0 ms 2024-12-06T10:16:06,918 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. 2024-12-06T10:16:06,918 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. 2024-12-06T10:16:06,918 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for e23dcdaafd855359c69c28f3729df93f: 2024-12-06T10:16:06,919 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T10:16:06,919 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733480166919"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733480166919"}]},"ts":"1733480166919"} 2024-12-06T10:16:06,921 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T10:16:06,922 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T10:16:06,922 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480166922"}]},"ts":"1733480166922"} 2024-12-06T10:16:06,923 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T10:16:06,926 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=e23dcdaafd855359c69c28f3729df93f, ASSIGN}] 2024-12-06T10:16:06,927 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=e23dcdaafd855359c69c28f3729df93f, ASSIGN 2024-12-06T10:16:06,928 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=e23dcdaafd855359c69c28f3729df93f, ASSIGN; state=OFFLINE, location=552d6a33fa09,45007,1733480166127; forceNewPlan=false, retain=false 2024-12-06T10:16:07,078 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=e23dcdaafd855359c69c28f3729df93f, regionState=OPENING, regionLocation=552d6a33fa09,45007,1733480166127 2024-12-06T10:16:07,080 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure e23dcdaafd855359c69c28f3729df93f, server=552d6a33fa09,45007,1733480166127}] 2024-12-06T10:16:07,233 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 552d6a33fa09,45007,1733480166127 2024-12-06T10:16:07,236 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. 2024-12-06T10:16:07,236 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => e23dcdaafd855359c69c28f3729df93f, NAME => 'hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f.', STARTKEY => '', ENDKEY => ''} 2024-12-06T10:16:07,236 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace e23dcdaafd855359c69c28f3729df93f 2024-12-06T10:16:07,236 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T10:16:07,236 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for e23dcdaafd855359c69c28f3729df93f 2024-12-06T10:16:07,236 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for e23dcdaafd855359c69c28f3729df93f 2024-12-06T10:16:07,238 INFO [StoreOpener-e23dcdaafd855359c69c28f3729df93f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e23dcdaafd855359c69c28f3729df93f 2024-12-06T10:16:07,239 INFO [StoreOpener-e23dcdaafd855359c69c28f3729df93f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e23dcdaafd855359c69c28f3729df93f columnFamilyName info 2024-12-06T10:16:07,239 DEBUG [StoreOpener-e23dcdaafd855359c69c28f3729df93f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T10:16:07,239 INFO [StoreOpener-e23dcdaafd855359c69c28f3729df93f-1 {}] regionserver.HStore(327): Store=e23dcdaafd855359c69c28f3729df93f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T10:16:07,240 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/namespace/e23dcdaafd855359c69c28f3729df93f 2024-12-06T10:16:07,240 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/namespace/e23dcdaafd855359c69c28f3729df93f 2024-12-06T10:16:07,242 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for e23dcdaafd855359c69c28f3729df93f 2024-12-06T10:16:07,244 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/namespace/e23dcdaafd855359c69c28f3729df93f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T10:16:07,245 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened e23dcdaafd855359c69c28f3729df93f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=749833, jitterRate=-0.04653927683830261}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T10:16:07,245 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for e23dcdaafd855359c69c28f3729df93f: 2024-12-06T10:16:07,246 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f., pid=6, masterSystemTime=1733480167232 2024-12-06T10:16:07,247 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. 2024-12-06T10:16:07,247 INFO [RS_OPEN_PRIORITY_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. 2024-12-06T10:16:07,248 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=e23dcdaafd855359c69c28f3729df93f, regionState=OPEN, openSeqNum=2, regionLocation=552d6a33fa09,45007,1733480166127 2024-12-06T10:16:07,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T10:16:07,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure e23dcdaafd855359c69c28f3729df93f, server=552d6a33fa09,45007,1733480166127 in 169 msec 2024-12-06T10:16:07,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T10:16:07,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=e23dcdaafd855359c69c28f3729df93f, ASSIGN in 325 msec 2024-12-06T10:16:07,253 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T10:16:07,253 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733480167253"}]},"ts":"1733480167253"} 2024-12-06T10:16:07,254 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T10:16:07,256 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T10:16:07,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 356 msec 2024-12-06T10:16:07,302 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T10:16:07,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:16:07,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:07,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:07,308 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T10:16:07,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:16:07,317 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 9 msec 2024-12-06T10:16:07,320 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T10:16:07,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T10:16:07,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 8 msec 2024-12-06T10:16:07,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T10:16:07,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T10:16:07,336 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.178sec 2024-12-06T10:16:07,336 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T10:16:07,336 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T10:16:07,336 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T10:16:07,336 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T10:16:07,337 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T10:16:07,337 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,32989,1733480166077-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T10:16:07,337 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,32989,1733480166077-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T10:16:07,338 DEBUG [master/552d6a33fa09:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T10:16:07,338 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T10:16:07,339 INFO [master/552d6a33fa09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=552d6a33fa09,32989,1733480166077-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T10:16:07,350 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x444ab434 to 127.0.0.1:63512 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c77d1ee 2024-12-06T10:16:07,353 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3de0005b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T10:16:07,355 DEBUG [hconnection-0x7330f408-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T10:16:07,356 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T10:16:07,358 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=552d6a33fa09,32989,1733480166077 2024-12-06T10:16:07,358 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T10:16:07,360 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-06T10:16:07,360 INFO [Time-limited test {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T10:16:07,362 INFO [Time-limited test {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/test.com,8080,1, archiveDir=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/oldWALs, maxLogs=32 2024-12-06T10:16:07,363 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733480167362 2024-12-06T10:16:07,367 INFO [Time-limited test {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/test.com,8080,1/test.com%2C8080%2C1.1733480167362 2024-12-06T10:16:07,367 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35129:35129),(127.0.0.1/127.0.0.1:35351:35351)] 2024-12-06T10:16:07,367 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733480167367 2024-12-06T10:16:07,374 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/test.com,8080,1/test.com%2C8080%2C1.1733480167362 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/test.com,8080,1/test.com%2C8080%2C1.1733480167367 2024-12-06T10:16:07,374 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35351:35351),(127.0.0.1/127.0.0.1:35129:35129)] 2024-12-06T10:16:07,374 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/test.com,8080,1/test.com%2C8080%2C1.1733480167362 is not closed yet, will try archiving it next time 2024-12-06T10:16:07,375 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/test.com,8080,1 2024-12-06T10:16:07,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741837_1013 (size=93) 2024-12-06T10:16:07,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741838_1014 (size=93) 2024-12-06T10:16:07,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741838_1014 (size=93) 2024-12-06T10:16:07,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741837_1013 (size=93) 2024-12-06T10:16:07,378 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/test.com,8080,1/test.com%2C8080%2C1.1733480167362 to hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/oldWALs/test.com%2C8080%2C1.1733480167362 2024-12-06T10:16:07,381 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/oldWALs 2024-12-06T10:16:07,381 INFO [Time-limited test {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733480167367) 2024-12-06T10:16:07,381 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T10:16:07,381 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x444ab434 to 127.0.0.1:63512 2024-12-06T10:16:07,381 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:16:07,381 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T10:16:07,381 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1406856489, stopped=false 2024-12-06T10:16:07,381 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=552d6a33fa09,32989,1733480166077 2024-12-06T10:16:07,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:16:07,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T10:16:07,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:07,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:07,383 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T10:16:07,384 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:16:07,384 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T10:16:07,384 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:16:07,384 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,45007,1733480166127' ***** 2024-12-06T10:16:07,384 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T10:16:07,384 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T10:16:07,385 INFO [RS:0;552d6a33fa09:45007 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T10:16:07,385 INFO [RS:0;552d6a33fa09:45007 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T10:16:07,385 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(3579): Received CLOSE for e23dcdaafd855359c69c28f3729df93f 2024-12-06T10:16:07,385 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T10:16:07,385 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,45007,1733480166127 2024-12-06T10:16:07,385 DEBUG [RS:0;552d6a33fa09:45007 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:16:07,385 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T10:16:07,385 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T10:16:07,385 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T10:16:07,385 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T10:16:07,385 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing e23dcdaafd855359c69c28f3729df93f, disabling compactions & flushes 2024-12-06T10:16:07,385 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. 2024-12-06T10:16:07,385 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. 2024-12-06T10:16:07,385 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. after waiting 0 ms 2024-12-06T10:16:07,385 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. 2024-12-06T10:16:07,385 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-06T10:16:07,385 DEBUG [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(1603): Online Regions={e23dcdaafd855359c69c28f3729df93f=hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f., 1588230740=hbase:meta,,1.1588230740} 2024-12-06T10:16:07,385 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing e23dcdaafd855359c69c28f3729df93f 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-06T10:16:07,385 DEBUG [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, e23dcdaafd855359c69c28f3729df93f 2024-12-06T10:16:07,385 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T10:16:07,385 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T10:16:07,385 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T10:16:07,385 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T10:16:07,385 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T10:16:07,386 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=1.23 KB heapSize=2.87 KB 2024-12-06T10:16:07,401 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740/.tmp/info/0c1d34c85ee143ae979aaccbb1c66bb9 is 143, key is hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f./info:regioninfo/1733480167248/Put/seqid=0 2024-12-06T10:16:07,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741839_1015 (size=6595) 2024-12-06T10:16:07,407 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/namespace/e23dcdaafd855359c69c28f3729df93f/.tmp/info/bd1a6a2bcfe4450eb6d6a73683f8b95b is 45, key is default/info:d/1733480167312/Put/seqid=0 2024-12-06T10:16:07,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741839_1015 (size=6595) 2024-12-06T10:16:07,407 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.14 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740/.tmp/info/0c1d34c85ee143ae979aaccbb1c66bb9 2024-12-06T10:16:07,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741840_1016 (size=5037) 2024-12-06T10:16:07,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741840_1016 (size=5037) 2024-12-06T10:16:07,412 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/namespace/e23dcdaafd855359c69c28f3729df93f/.tmp/info/bd1a6a2bcfe4450eb6d6a73683f8b95b 2024-12-06T10:16:07,418 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/namespace/e23dcdaafd855359c69c28f3729df93f/.tmp/info/bd1a6a2bcfe4450eb6d6a73683f8b95b as hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/namespace/e23dcdaafd855359c69c28f3729df93f/info/bd1a6a2bcfe4450eb6d6a73683f8b95b 2024-12-06T10:16:07,422 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/namespace/e23dcdaafd855359c69c28f3729df93f/info/bd1a6a2bcfe4450eb6d6a73683f8b95b, entries=2, sequenceid=6, filesize=4.9 K 2024-12-06T10:16:07,423 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for e23dcdaafd855359c69c28f3729df93f in 38ms, sequenceid=6, compaction requested=false 2024-12-06T10:16:07,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45977/user/jenkins/test-data/7a7fc2c0-3104-383a-3123-f3219c328565/WALs/552d6a33fa09,39435,1733479918098/552d6a33fa09%2C39435%2C1733479918098.meta.1733479919089.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor107.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T10:16:07,423 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-06T10:16:07,427 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/namespace/e23dcdaafd855359c69c28f3729df93f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-06T10:16:07,428 INFO [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. 2024-12-06T10:16:07,428 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for e23dcdaafd855359c69c28f3729df93f: 2024-12-06T10:16:07,428 DEBUG [RS_CLOSE_REGION-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733480166900.e23dcdaafd855359c69c28f3729df93f. 2024-12-06T10:16:07,434 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740/.tmp/table/15381d56f95044638b5abb3594a8414f is 51, key is hbase:namespace/table:state/1733480167253/Put/seqid=0 2024-12-06T10:16:07,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741841_1017 (size=5242) 2024-12-06T10:16:07,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741841_1017 (size=5242) 2024-12-06T10:16:07,439 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=94 B at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740/.tmp/table/15381d56f95044638b5abb3594a8414f 2024-12-06T10:16:07,444 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740/.tmp/info/0c1d34c85ee143ae979aaccbb1c66bb9 as hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740/info/0c1d34c85ee143ae979aaccbb1c66bb9 2024-12-06T10:16:07,449 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740/info/0c1d34c85ee143ae979aaccbb1c66bb9, entries=10, sequenceid=9, filesize=6.4 K 2024-12-06T10:16:07,449 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740/.tmp/table/15381d56f95044638b5abb3594a8414f as hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740/table/15381d56f95044638b5abb3594a8414f 2024-12-06T10:16:07,454 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740/table/15381d56f95044638b5abb3594a8414f, entries=2, sequenceid=9, filesize=5.1 K 2024-12-06T10:16:07,455 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~1.23 KB/1264, heapSize ~2.59 KB/2648, currentSize=0 B/0 for 1588230740 in 70ms, sequenceid=9, compaction requested=false 2024-12-06T10:16:07,455 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T10:16:07,459 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/data/hbase/meta/1588230740/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-06T10:16:07,460 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T10:16:07,460 INFO [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T10:16:07,460 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T10:16:07,460 DEBUG [RS_CLOSE_META-regionserver/552d6a33fa09:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T10:16:07,585 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,45007,1733480166127; all regions closed. 2024-12-06T10:16:07,586 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/552d6a33fa09,45007,1733480166127 2024-12-06T10:16:07,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741834_1010 (size=2484) 2024-12-06T10:16:07,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741834_1010 (size=2484) 2024-12-06T10:16:07,590 DEBUG [RS:0;552d6a33fa09:45007 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/oldWALs 2024-12-06T10:16:07,590 INFO [RS:0;552d6a33fa09:45007 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 552d6a33fa09%2C45007%2C1733480166127.meta:.meta(num 1733480166864) 2024-12-06T10:16:07,591 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/WALs/552d6a33fa09,45007,1733480166127 2024-12-06T10:16:07,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741833_1009 (size=1414) 2024-12-06T10:16:07,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741833_1009 (size=1414) 2024-12-06T10:16:07,596 DEBUG [RS:0;552d6a33fa09:45007 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/oldWALs 2024-12-06T10:16:07,596 INFO [RS:0;552d6a33fa09:45007 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 552d6a33fa09%2C45007%2C1733480166127:(num 1733480166510) 2024-12-06T10:16:07,596 DEBUG [RS:0;552d6a33fa09:45007 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:16:07,596 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T10:16:07,597 INFO [RS:0;552d6a33fa09:45007 {}] hbase.ChoreService(370): Chore service for: regionserver/552d6a33fa09:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T10:16:07,597 INFO [regionserver/552d6a33fa09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:16:07,597 INFO [RS:0;552d6a33fa09:45007 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45007 2024-12-06T10:16:07,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/552d6a33fa09,45007,1733480166127 2024-12-06T10:16:07,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T10:16:07,600 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [552d6a33fa09,45007,1733480166127] 2024-12-06T10:16:07,600 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 552d6a33fa09,45007,1733480166127; numProcessing=1 2024-12-06T10:16:07,601 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/552d6a33fa09,45007,1733480166127 already deleted, retry=false 2024-12-06T10:16:07,601 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 552d6a33fa09,45007,1733480166127 expired; onlineServers=0 2024-12-06T10:16:07,601 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '552d6a33fa09,32989,1733480166077' ***** 2024-12-06T10:16:07,601 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T10:16:07,601 DEBUG [M:0;552d6a33fa09:32989 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6638faff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=552d6a33fa09/172.17.0.2:0 2024-12-06T10:16:07,601 INFO [M:0;552d6a33fa09:32989 {}] regionserver.HRegionServer(1224): stopping server 552d6a33fa09,32989,1733480166077 2024-12-06T10:16:07,601 INFO [M:0;552d6a33fa09:32989 {}] regionserver.HRegionServer(1250): stopping server 552d6a33fa09,32989,1733480166077; all regions closed. 2024-12-06T10:16:07,601 DEBUG [M:0;552d6a33fa09:32989 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T10:16:07,601 DEBUG [M:0;552d6a33fa09:32989 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T10:16:07,602 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T10:16:07,602 DEBUG [M:0;552d6a33fa09:32989 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T10:16:07,602 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733480166273 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.large.0-1733480166273,5,FailOnTimeoutGroup] 2024-12-06T10:16:07,602 DEBUG [master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733480166273 {}] cleaner.HFileCleaner(306): Exit Thread[master/552d6a33fa09:0:becomeActiveMaster-HFileCleaner.small.0-1733480166273,5,FailOnTimeoutGroup] 2024-12-06T10:16:07,602 INFO [M:0;552d6a33fa09:32989 {}] hbase.ChoreService(370): Chore service for: master/552d6a33fa09:0 had [] on shutdown 2024-12-06T10:16:07,602 DEBUG [M:0;552d6a33fa09:32989 {}] master.HMaster(1733): Stopping service threads 2024-12-06T10:16:07,602 INFO [M:0;552d6a33fa09:32989 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T10:16:07,602 INFO [M:0;552d6a33fa09:32989 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T10:16:07,602 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T10:16:07,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T10:16:07,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T10:16:07,603 DEBUG [M:0;552d6a33fa09:32989 {}] zookeeper.ZKUtil(347): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T10:16:07,603 WARN [M:0;552d6a33fa09:32989 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T10:16:07,603 INFO [M:0;552d6a33fa09:32989 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T10:16:07,603 INFO [M:0;552d6a33fa09:32989 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T10:16:07,603 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T10:16:07,603 DEBUG [M:0;552d6a33fa09:32989 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T10:16:07,603 INFO [M:0;552d6a33fa09:32989 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:07,603 DEBUG [M:0;552d6a33fa09:32989 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:07,603 DEBUG [M:0;552d6a33fa09:32989 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T10:16:07,603 DEBUG [M:0;552d6a33fa09:32989 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:07,603 INFO [M:0;552d6a33fa09:32989 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=25.32 KB heapSize=32.31 KB 2024-12-06T10:16:07,619 DEBUG [M:0;552d6a33fa09:32989 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba22393c72684987b92791ad74bff78d is 82, key is hbase:meta,,1/info:regioninfo/1733480166882/Put/seqid=0 2024-12-06T10:16:07,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741842_1018 (size=5672) 2024-12-06T10:16:07,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741842_1018 (size=5672) 2024-12-06T10:16:07,624 INFO [M:0;552d6a33fa09:32989 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba22393c72684987b92791ad74bff78d 2024-12-06T10:16:07,642 DEBUG [M:0;552d6a33fa09:32989 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a9345336f43348e5a1bbf345c4c93ce1 is 696, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733480167257/Put/seqid=0 2024-12-06T10:16:07,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741843_1019 (size=6626) 2024-12-06T10:16:07,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741843_1019 (size=6626) 2024-12-06T10:16:07,646 INFO [M:0;552d6a33fa09:32989 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.72 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a9345336f43348e5a1bbf345c4c93ce1 2024-12-06T10:16:07,664 DEBUG [M:0;552d6a33fa09:32989 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/88fe364b494a48889c79362d457b1201 is 69, key is 552d6a33fa09,45007,1733480166127/rs:state/1733480166368/Put/seqid=0 2024-12-06T10:16:07,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741844_1020 (size=5156) 2024-12-06T10:16:07,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741844_1020 (size=5156) 2024-12-06T10:16:07,669 INFO [M:0;552d6a33fa09:32989 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/88fe364b494a48889c79362d457b1201 2024-12-06T10:16:07,686 DEBUG [M:0;552d6a33fa09:32989 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/03c9057a652a4816b2af1f4f5578e5a6 is 52, key is load_balancer_on/state:d/1733480167359/Put/seqid=0 2024-12-06T10:16:07,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741845_1021 (size=5056) 2024-12-06T10:16:07,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741845_1021 (size=5056) 2024-12-06T10:16:07,691 INFO [M:0;552d6a33fa09:32989 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/03c9057a652a4816b2af1f4f5578e5a6 2024-12-06T10:16:07,695 DEBUG [M:0;552d6a33fa09:32989 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba22393c72684987b92791ad74bff78d as hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ba22393c72684987b92791ad74bff78d 2024-12-06T10:16:07,699 INFO [M:0;552d6a33fa09:32989 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ba22393c72684987b92791ad74bff78d, entries=8, sequenceid=70, filesize=5.5 K 2024-12-06T10:16:07,699 DEBUG [M:0;552d6a33fa09:32989 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a9345336f43348e5a1bbf345c4c93ce1 as hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a9345336f43348e5a1bbf345c4c93ce1 2024-12-06T10:16:07,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:16:07,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45007-0x10066d3eb960001, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:16:07,700 INFO [RS:0;552d6a33fa09:45007 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,45007,1733480166127; zookeeper connection closed. 2024-12-06T10:16:07,700 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4dc5c1df {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4dc5c1df 2024-12-06T10:16:07,701 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T10:16:07,704 INFO [M:0;552d6a33fa09:32989 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a9345336f43348e5a1bbf345c4c93ce1, entries=8, sequenceid=70, filesize=6.5 K 2024-12-06T10:16:07,704 DEBUG [M:0;552d6a33fa09:32989 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/88fe364b494a48889c79362d457b1201 as hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/88fe364b494a48889c79362d457b1201 2024-12-06T10:16:07,708 INFO [M:0;552d6a33fa09:32989 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/88fe364b494a48889c79362d457b1201, entries=1, sequenceid=70, filesize=5.0 K 2024-12-06T10:16:07,708 DEBUG [M:0;552d6a33fa09:32989 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/03c9057a652a4816b2af1f4f5578e5a6 as hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/03c9057a652a4816b2af1f4f5578e5a6 2024-12-06T10:16:07,712 INFO [M:0;552d6a33fa09:32989 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46331/user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/03c9057a652a4816b2af1f4f5578e5a6, entries=1, sequenceid=70, filesize=4.9 K 2024-12-06T10:16:07,713 INFO [M:0;552d6a33fa09:32989 {}] regionserver.HRegion(3040): Finished flush of dataSize ~25.32 KB/25929, heapSize ~32.25 KB/33024, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 109ms, sequenceid=70, compaction requested=false 2024-12-06T10:16:07,714 INFO [M:0;552d6a33fa09:32989 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T10:16:07,714 DEBUG [M:0;552d6a33fa09:32989 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T10:16:07,714 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/df33eee8-b1da-4696-9315-1aaac3271efe/MasterData/WALs/552d6a33fa09,32989,1733480166077 2024-12-06T10:16:07,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35931 is added to blk_1073741830_1006 (size=31030) 2024-12-06T10:16:07,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40431 is added to blk_1073741830_1006 (size=31030) 2024-12-06T10:16:07,717 INFO [M:0;552d6a33fa09:32989 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T10:16:07,717 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T10:16:07,717 INFO [M:0;552d6a33fa09:32989 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:32989 2024-12-06T10:16:07,718 DEBUG [M:0;552d6a33fa09:32989 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/552d6a33fa09,32989,1733480166077 already deleted, retry=false 2024-12-06T10:16:07,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:16:07,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32989-0x10066d3eb960000, quorum=127.0.0.1:63512, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T10:16:07,820 INFO [M:0;552d6a33fa09:32989 {}] regionserver.HRegionServer(1307): Exiting; stopping=552d6a33fa09,32989,1733480166077; zookeeper connection closed. 2024-12-06T10:16:07,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d43e104{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:16:07,823 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@660987f3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:16:07,823 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:16:07,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@12527899{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:16:07,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49726ed6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/hadoop.log.dir/,STOPPED} 2024-12-06T10:16:07,824 WARN [BP-1836740515-172.17.0.2-1733480165363 heartbeating to localhost/127.0.0.1:46331 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:16:07,825 WARN [BP-1836740515-172.17.0.2-1733480165363 heartbeating to localhost/127.0.0.1:46331 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1836740515-172.17.0.2-1733480165363 (Datanode Uuid efced11e-bb85-45f7-82fa-86ebe1e567de) service to localhost/127.0.0.1:46331 2024-12-06T10:16:07,825 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:16:07,825 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:16:07,825 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/cluster_1d671e7a-800e-9ad0-e47a-79a6d003e698/dfs/data/data3/current/BP-1836740515-172.17.0.2-1733480165363 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:16:07,825 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/cluster_1d671e7a-800e-9ad0-e47a-79a6d003e698/dfs/data/data4/current/BP-1836740515-172.17.0.2-1733480165363 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:16:07,825 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:16:07,827 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@66e0ac38{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T10:16:07,827 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@67c014bf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:16:07,827 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:16:07,828 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3cc77b22{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:16:07,828 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77481e88{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/hadoop.log.dir/,STOPPED} 2024-12-06T10:16:07,829 WARN [BP-1836740515-172.17.0.2-1733480165363 heartbeating to localhost/127.0.0.1:46331 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T10:16:07,829 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T10:16:07,829 WARN [BP-1836740515-172.17.0.2-1733480165363 heartbeating to localhost/127.0.0.1:46331 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1836740515-172.17.0.2-1733480165363 (Datanode Uuid 34353ccf-ee75-4930-8db0-f3dd99224883) service to localhost/127.0.0.1:46331 2024-12-06T10:16:07,829 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T10:16:07,829 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/cluster_1d671e7a-800e-9ad0-e47a-79a6d003e698/dfs/data/data1/current/BP-1836740515-172.17.0.2-1733480165363 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:16:07,830 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/cluster_1d671e7a-800e-9ad0-e47a-79a6d003e698/dfs/data/data2/current/BP-1836740515-172.17.0.2-1733480165363 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T10:16:07,830 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T10:16:07,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25079c3d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T10:16:07,838 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64cf7e05{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T10:16:07,838 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T10:16:07,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@571fe688{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T10:16:07,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16cb9ccf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b04583cf-4c48-8e14-5e9e-1959f2e192fa/hadoop.log.dir/,STOPPED} 2024-12-06T10:16:07,846 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T10:16:07,866 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-06T10:16:07,874 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=147 (was 125) - Thread LEAK? -, OpenFileDescriptor=519 (was 496) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=145 (was 114) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6446 (was 6456)