2024-12-01 18:13:13,575 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-01 18:13:13,591 main DEBUG Took 0.013327 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-01 18:13:13,591 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-01 18:13:13,592 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-01 18:13:13,593 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-01 18:13:13,595 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:13:13,603 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-01 18:13:13,618 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,620 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:13:13,621 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,621 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:13:13,622 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,622 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:13:13,623 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,624 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:13:13,624 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,625 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:13:13,626 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,626 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:13:13,627 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,627 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:13:13,628 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,628 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:13:13,629 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,629 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:13:13,630 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,630 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:13:13,631 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,631 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:13:13,632 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,632 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:13:13,633 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,633 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-01 18:13:13,635 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:13:13,637 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-01 18:13:13,641 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-01 18:13:13,642 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-01 18:13:13,643 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-01 18:13:13,644 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-01 18:13:13,656 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-01 18:13:13,659 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-01 18:13:13,662 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-01 18:13:13,662 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-01 18:13:13,663 main DEBUG createAppenders(={Console}) 2024-12-01 18:13:13,663 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc initialized 2024-12-01 18:13:13,664 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-01 18:13:13,664 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc OK. 2024-12-01 18:13:13,665 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-01 18:13:13,665 main DEBUG OutputStream closed 2024-12-01 18:13:13,666 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-01 18:13:13,666 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-01 18:13:13,667 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@53ce1329 OK 2024-12-01 18:13:13,756 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-01 18:13:13,758 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-01 18:13:13,759 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-01 18:13:13,760 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-01 18:13:13,760 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-01 18:13:13,761 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-01 18:13:13,761 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-01 18:13:13,762 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-01 18:13:13,762 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-01 18:13:13,763 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-01 18:13:13,763 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-01 18:13:13,763 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-01 18:13:13,763 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-01 18:13:13,764 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-01 18:13:13,764 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-01 18:13:13,764 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-01 18:13:13,765 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-01 18:13:13,765 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-01 18:13:13,768 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-01 18:13:13,768 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-01 18:13:13,768 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-01 18:13:13,769 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-01T18:13:14,096 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a 2024-12-01 18:13:14,101 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-01 18:13:14,101 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-01T18:13:14,115 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-01T18:13:14,155 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=286, MaxFileDescriptor=1048576, SystemLoadAverage=197, ProcessCount=11, AvailableMemoryMB=3702 2024-12-01T18:13:14,158 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T18:13:14,161 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/cluster_c1d378f7-0a5d-f163-9b2b-755009fab9b6, deleteOnExit=true 2024-12-01T18:13:14,161 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-01T18:13:14,162 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/test.cache.data in system properties and HBase conf 2024-12-01T18:13:14,162 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T18:13:14,163 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/hadoop.log.dir in system properties and HBase conf 2024-12-01T18:13:14,164 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T18:13:14,164 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T18:13:14,164 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-01T18:13:14,266 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-01T18:13:14,378 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T18:13:14,383 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:13:14,384 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:13:14,385 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T18:13:14,385 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:13:14,386 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T18:13:14,386 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T18:13:14,387 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:13:14,387 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:13:14,388 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T18:13:14,388 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/nfs.dump.dir in system properties and HBase conf 2024-12-01T18:13:14,388 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/java.io.tmpdir in system properties and HBase conf 2024-12-01T18:13:14,389 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:13:14,389 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T18:13:14,389 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T18:13:14,905 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-01T18:13:15,240 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-01T18:13:15,326 INFO [Time-limited test {}] log.Log(170): Logging initialized @2618ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-01T18:13:15,418 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:13:15,498 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:13:15,527 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:13:15,527 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:13:15,529 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:13:15,547 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:13:15,550 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@88aab13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:13:15,551 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74468826{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:13:15,778 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5682c4d1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/java.io.tmpdir/jetty-localhost-36999-hadoop-hdfs-3_4_1-tests_jar-_-any-14799350332033253840/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:13:15,786 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ff1a6c1{HTTP/1.1, (http/1.1)}{localhost:36999} 2024-12-01T18:13:15,786 INFO [Time-limited test {}] server.Server(415): Started @3079ms 2024-12-01T18:13:15,820 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-01T18:13:16,260 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:13:16,268 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:13:16,269 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:13:16,270 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:13:16,270 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:13:16,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3cf7922e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:13:16,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2276bd44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:13:16,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c4b1b4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/java.io.tmpdir/jetty-localhost-37209-hadoop-hdfs-3_4_1-tests_jar-_-any-14816819527007114651/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:13:16,394 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9fb4bbe{HTTP/1.1, (http/1.1)}{localhost:37209} 2024-12-01T18:13:16,395 INFO [Time-limited test {}] server.Server(415): Started @3688ms 2024-12-01T18:13:16,453 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:13:16,581 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:13:16,589 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:13:16,592 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:13:16,593 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:13:16,593 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T18:13:16,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@670df016{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:13:16,597 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4debea22{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:13:16,760 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c482eac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/java.io.tmpdir/jetty-localhost-34673-hadoop-hdfs-3_4_1-tests_jar-_-any-14162598721379380590/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:13:16,761 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@408e1d66{HTTP/1.1, (http/1.1)}{localhost:34673} 2024-12-01T18:13:16,761 INFO [Time-limited test {}] server.Server(415): Started @4054ms 2024-12-01T18:13:16,765 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:13:16,964 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/cluster_c1d378f7-0a5d-f163-9b2b-755009fab9b6/dfs/data/data3/current/BP-1507522640-172.17.0.2-1733076794999/current, will proceed with Du for space computation calculation, 2024-12-01T18:13:16,964 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/cluster_c1d378f7-0a5d-f163-9b2b-755009fab9b6/dfs/data/data4/current/BP-1507522640-172.17.0.2-1733076794999/current, will proceed with Du for space computation calculation, 2024-12-01T18:13:16,964 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/cluster_c1d378f7-0a5d-f163-9b2b-755009fab9b6/dfs/data/data1/current/BP-1507522640-172.17.0.2-1733076794999/current, will proceed with Du for space computation calculation, 2024-12-01T18:13:16,968 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/cluster_c1d378f7-0a5d-f163-9b2b-755009fab9b6/dfs/data/data2/current/BP-1507522640-172.17.0.2-1733076794999/current, will proceed with Du for space computation calculation, 2024-12-01T18:13:17,044 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:13:17,044 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:13:17,115 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x694be0dcfed8ccbc with lease ID 0xe2d7b64ed465f9c6: Processing first storage report for DS-8282ea61-5623-4592-b32c-ccc6ebc71209 from datanode DatanodeRegistration(127.0.0.1:38223, datanodeUuid=acb88787-459b-4d82-951e-73e255f8eccb, infoPort=33153, infoSecurePort=0, ipcPort=38689, storageInfo=lv=-57;cid=testClusterID;nsid=902554889;c=1733076794999) 2024-12-01T18:13:17,117 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x694be0dcfed8ccbc with lease ID 0xe2d7b64ed465f9c6: from storage DS-8282ea61-5623-4592-b32c-ccc6ebc71209 node DatanodeRegistration(127.0.0.1:38223, datanodeUuid=acb88787-459b-4d82-951e-73e255f8eccb, infoPort=33153, infoSecurePort=0, ipcPort=38689, storageInfo=lv=-57;cid=testClusterID;nsid=902554889;c=1733076794999), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-01T18:13:17,117 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x694670ffb615d31e with lease ID 0xe2d7b64ed465f9c7: Processing first storage report for DS-21d65a05-a647-4fad-a102-f910c8d693c8 from datanode DatanodeRegistration(127.0.0.1:34749, datanodeUuid=54747de7-1696-43b2-a754-56ff5599f560, infoPort=35333, infoSecurePort=0, ipcPort=43273, storageInfo=lv=-57;cid=testClusterID;nsid=902554889;c=1733076794999) 2024-12-01T18:13:17,118 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x694670ffb615d31e with lease ID 0xe2d7b64ed465f9c7: from storage DS-21d65a05-a647-4fad-a102-f910c8d693c8 node DatanodeRegistration(127.0.0.1:34749, datanodeUuid=54747de7-1696-43b2-a754-56ff5599f560, infoPort=35333, infoSecurePort=0, ipcPort=43273, storageInfo=lv=-57;cid=testClusterID;nsid=902554889;c=1733076794999), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T18:13:17,118 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x694be0dcfed8ccbc with lease ID 0xe2d7b64ed465f9c6: Processing first storage report for DS-e51c73b9-5778-4dc1-a53b-83dffd040e84 from datanode DatanodeRegistration(127.0.0.1:38223, datanodeUuid=acb88787-459b-4d82-951e-73e255f8eccb, infoPort=33153, infoSecurePort=0, ipcPort=38689, storageInfo=lv=-57;cid=testClusterID;nsid=902554889;c=1733076794999) 2024-12-01T18:13:17,118 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x694be0dcfed8ccbc with lease ID 0xe2d7b64ed465f9c6: from storage DS-e51c73b9-5778-4dc1-a53b-83dffd040e84 node DatanodeRegistration(127.0.0.1:38223, datanodeUuid=acb88787-459b-4d82-951e-73e255f8eccb, infoPort=33153, infoSecurePort=0, ipcPort=38689, storageInfo=lv=-57;cid=testClusterID;nsid=902554889;c=1733076794999), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:13:17,119 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x694670ffb615d31e with lease ID 0xe2d7b64ed465f9c7: Processing first storage report for DS-e82faf36-75ad-49ec-9d6d-b4c7c7040479 from datanode DatanodeRegistration(127.0.0.1:34749, datanodeUuid=54747de7-1696-43b2-a754-56ff5599f560, infoPort=35333, infoSecurePort=0, ipcPort=43273, storageInfo=lv=-57;cid=testClusterID;nsid=902554889;c=1733076794999) 2024-12-01T18:13:17,119 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x694670ffb615d31e with lease ID 0xe2d7b64ed465f9c7: from storage DS-e82faf36-75ad-49ec-9d6d-b4c7c7040479 node DatanodeRegistration(127.0.0.1:34749, datanodeUuid=54747de7-1696-43b2-a754-56ff5599f560, infoPort=35333, infoSecurePort=0, ipcPort=43273, storageInfo=lv=-57;cid=testClusterID;nsid=902554889;c=1733076794999), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:13:17,234 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a 2024-12-01T18:13:17,313 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/cluster_c1d378f7-0a5d-f163-9b2b-755009fab9b6/zookeeper_0, clientPort=56284, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/cluster_c1d378f7-0a5d-f163-9b2b-755009fab9b6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/cluster_c1d378f7-0a5d-f163-9b2b-755009fab9b6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T18:13:17,323 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=56284 2024-12-01T18:13:17,333 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:13:17,335 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:13:17,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:13:17,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:13:17,981 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12 with version=8 2024-12-01T18:13:17,981 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/hbase-staging 2024-12-01T18:13:18,107 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-01T18:13:18,381 INFO [Time-limited test {}] client.ConnectionUtils(129): master/b8365d49b74c:0 server-side Connection retries=45 2024-12-01T18:13:18,401 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:13:18,401 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:13:18,402 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:13:18,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:13:18,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:13:18,545 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:13:18,605 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-01T18:13:18,614 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-01T18:13:18,617 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:13:18,644 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 32016 (auto-detected) 2024-12-01T18:13:18,645 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-01T18:13:18,665 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41393 2024-12-01T18:13:18,673 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:13:18,676 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:13:18,689 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:41393 connecting to ZooKeeper ensemble=127.0.0.1:56284 2024-12-01T18:13:18,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:413930x0, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:13:18,724 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41393-0x1004ec900550000 connected 2024-12-01T18:13:18,753 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:13:18,756 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:13:18,759 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:13:18,763 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41393 2024-12-01T18:13:18,764 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41393 2024-12-01T18:13:18,764 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41393 2024-12-01T18:13:18,765 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41393 2024-12-01T18:13:18,766 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41393 2024-12-01T18:13:18,773 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12, hbase.cluster.distributed=false 2024-12-01T18:13:18,836 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b8365d49b74c:0 server-side Connection retries=45 2024-12-01T18:13:18,836 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:13:18,836 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:13:18,837 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:13:18,837 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:13:18,837 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:13:18,839 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:13:18,842 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:13:18,842 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45577 2024-12-01T18:13:18,844 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T18:13:18,850 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T18:13:18,851 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:13:18,855 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:13:18,860 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45577 connecting to ZooKeeper ensemble=127.0.0.1:56284 2024-12-01T18:13:18,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455770x0, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:13:18,867 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45577-0x1004ec900550001 connected 2024-12-01T18:13:18,867 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:13:18,869 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:13:18,870 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:13:18,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45577 2024-12-01T18:13:18,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45577 2024-12-01T18:13:18,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45577 2024-12-01T18:13:18,879 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45577 2024-12-01T18:13:18,879 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45577 2024-12-01T18:13:18,882 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/b8365d49b74c,41393,1733076798101 2024-12-01T18:13:18,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:13:18,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:13:18,891 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b8365d49b74c,41393,1733076798101 2024-12-01T18:13:18,898 DEBUG [M:0;b8365d49b74c:41393 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b8365d49b74c:41393 2024-12-01T18:13:18,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:13:18,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:13:18,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:18,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:18,913 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:13:18,914 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b8365d49b74c,41393,1733076798101 from backup master directory 2024-12-01T18:13:18,914 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:13:18,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b8365d49b74c,41393,1733076798101 2024-12-01T18:13:18,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:13:18,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:13:18,918 WARN [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:13:18,918 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b8365d49b74c,41393,1733076798101 2024-12-01T18:13:18,921 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-01T18:13:18,922 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-01T18:13:19,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:13:19,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:13:19,005 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/hbase.id with ID: b9c393c3-c84a-4da9-8780-bffd4e6fcce4 2024-12-01T18:13:19,051 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:13:19,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:19,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:19,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:13:19,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:13:19,119 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:13:19,121 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T18:13:19,129 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:13:19,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:13:19,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:13:19,190 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store 2024-12-01T18:13:19,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:13:19,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:13:19,212 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-01T18:13:19,212 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:13:19,213 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:13:19,213 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:13:19,214 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:13:19,214 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:13:19,214 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:13:19,214 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:13:19,214 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:13:19,216 WARN [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/.initializing 2024-12-01T18:13:19,216 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/WALs/b8365d49b74c,41393,1733076798101 2024-12-01T18:13:19,235 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C41393%2C1733076798101, suffix=, logDir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/WALs/b8365d49b74c,41393,1733076798101, archiveDir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/oldWALs, maxLogs=10 2024-12-01T18:13:19,247 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41393%2C1733076798101.1733076799243 2024-12-01T18:13:19,248 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(752): Using builder API via reflection for DFS file creation replicate flag. 2024-12-01T18:13:19,248 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(762): Using builder API via reflection for DFS file creation noLocalWrite flag. 2024-12-01T18:13:19,267 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/WALs/b8365d49b74c,41393,1733076798101/b8365d49b74c%2C41393%2C1733076798101.1733076799243 2024-12-01T18:13:19,275 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:33153:33153)] 2024-12-01T18:13:19,276 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:13:19,276 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:13:19,279 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:13:19,280 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:13:19,318 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:13:19,342 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T18:13:19,346 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:19,349 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:13:19,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:13:19,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T18:13:19,354 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:19,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:13:19,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:13:19,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T18:13:19,359 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:19,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:13:19,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:13:19,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T18:13:19,363 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:19,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:13:19,368 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:13:19,369 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:13:19,379 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T18:13:19,384 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:13:19,389 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:13:19,390 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=845079, jitterRate=0.07457397878170013}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T18:13:19,394 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:13:19,395 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T18:13:19,425 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2424d34e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:13:19,459 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-01T18:13:19,470 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T18:13:19,470 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T18:13:19,473 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T18:13:19,474 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-01T18:13:19,479 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-01T18:13:19,479 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T18:13:19,505 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T18:13:19,517 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T18:13:19,519 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-01T18:13:19,521 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T18:13:19,522 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T18:13:19,524 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-01T18:13:19,526 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T18:13:19,529 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T18:13:19,531 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-01T18:13:19,532 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T18:13:19,534 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T18:13:19,543 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T18:13:19,544 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T18:13:19,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:13:19,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:13:19,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:19,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:19,549 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=b8365d49b74c,41393,1733076798101, sessionid=0x1004ec900550000, setting cluster-up flag (Was=false) 2024-12-01T18:13:19,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:19,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:19,567 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T18:13:19,569 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b8365d49b74c,41393,1733076798101 2024-12-01T18:13:19,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:19,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:19,578 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T18:13:19,580 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b8365d49b74c,41393,1733076798101 2024-12-01T18:13:19,662 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-01T18:13:19,669 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-01T18:13:19,672 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T18:13:19,677 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b8365d49b74c,41393,1733076798101 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T18:13:19,681 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:13:19,681 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:13:19,681 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:13:19,681 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:13:19,682 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b8365d49b74c:0, corePoolSize=10, maxPoolSize=10 2024-12-01T18:13:19,682 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:13:19,682 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b8365d49b74c:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:13:19,682 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:13:19,689 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:13:19,690 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-01T18:13:19,691 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733076829691 2024-12-01T18:13:19,693 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T18:13:19,694 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T18:13:19,695 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:19,695 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:13:19,696 DEBUG [RS:0;b8365d49b74c:45577 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b8365d49b74c:45577 2024-12-01T18:13:19,697 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1008): ClusterId : b9c393c3-c84a-4da9-8780-bffd4e6fcce4 2024-12-01T18:13:19,698 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T18:13:19,698 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T18:13:19,699 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T18:13:19,699 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T18:13:19,701 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T18:13:19,703 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:19,704 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T18:13:19,705 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T18:13:19,706 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T18:13:19,706 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T18:13:19,706 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T18:13:19,708 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T18:13:19,708 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T18:13:19,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741831_1007 (size=1039) 2024-12-01T18:13:19,710 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T18:13:19,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741831_1007 (size=1039) 2024-12-01T18:13:19,710 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733076799710,5,FailOnTimeoutGroup] 2024-12-01T18:13:19,710 DEBUG [RS:0;b8365d49b74c:45577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71a29e92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:13:19,711 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733076799710,5,FailOnTimeoutGroup] 2024-12-01T18:13:19,711 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:19,711 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T18:13:19,712 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:19,712 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-01T18:13:19,713 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:19,713 DEBUG [RS:0;b8365d49b74c:45577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78e6fd31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b8365d49b74c/172.17.0.2:0 2024-12-01T18:13:19,713 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12 2024-12-01T18:13:19,717 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-01T18:13:19,717 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-01T18:13:19,717 DEBUG [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-01T18:13:19,719 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(3073): reportForDuty to master=b8365d49b74c,41393,1733076798101 with isa=b8365d49b74c/172.17.0.2:45577, startcode=1733076798835 2024-12-01T18:13:19,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:13:19,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:13:19,726 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:13:19,728 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:13:19,731 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:13:19,731 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:19,732 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:13:19,732 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:13:19,734 DEBUG [RS:0;b8365d49b74c:45577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:13:19,735 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:13:19,735 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:19,736 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:13:19,736 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:13:19,739 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:13:19,739 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:19,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:13:19,742 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740 2024-12-01T18:13:19,743 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740 2024-12-01T18:13:19,747 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-01T18:13:19,750 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-01T18:13:19,754 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:13:19,755 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778769, jitterRate=-0.00974525511264801}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-01T18:13:19,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-01T18:13:19,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:13:19,758 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:13:19,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:13:19,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:13:19,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:13:19,760 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-01T18:13:19,760 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:13:19,763 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:13:19,763 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-01T18:13:19,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T18:13:19,782 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T18:13:19,785 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T18:13:19,804 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45837, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:13:19,810 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41393 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b8365d49b74c,45577,1733076798835 2024-12-01T18:13:19,812 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41393 {}] master.ServerManager(486): Registering regionserver=b8365d49b74c,45577,1733076798835 2024-12-01T18:13:19,826 DEBUG [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12 2024-12-01T18:13:19,827 DEBUG [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:39817 2024-12-01T18:13:19,827 DEBUG [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-01T18:13:19,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:13:19,832 DEBUG [RS:0;b8365d49b74c:45577 {}] zookeeper.ZKUtil(111): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b8365d49b74c,45577,1733076798835 2024-12-01T18:13:19,832 WARN [RS:0;b8365d49b74c:45577 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:13:19,832 INFO [RS:0;b8365d49b74c:45577 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:13:19,832 DEBUG [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835 2024-12-01T18:13:19,834 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b8365d49b74c,45577,1733076798835] 2024-12-01T18:13:19,847 DEBUG [RS:0;b8365d49b74c:45577 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-01T18:13:19,859 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T18:13:19,873 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T18:13:19,876 INFO [RS:0;b8365d49b74c:45577 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:13:19,876 INFO [RS:0;b8365d49b74c:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:19,877 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-01T18:13:19,884 INFO [RS:0;b8365d49b74c:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:19,884 DEBUG [RS:0;b8365d49b74c:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:13:19,884 DEBUG [RS:0;b8365d49b74c:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:13:19,884 DEBUG [RS:0;b8365d49b74c:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:13:19,885 DEBUG [RS:0;b8365d49b74c:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:13:19,885 DEBUG [RS:0;b8365d49b74c:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:13:19,885 DEBUG [RS:0;b8365d49b74c:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b8365d49b74c:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:13:19,885 DEBUG [RS:0;b8365d49b74c:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:13:19,885 DEBUG [RS:0;b8365d49b74c:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:13:19,885 DEBUG [RS:0;b8365d49b74c:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:13:19,885 DEBUG [RS:0;b8365d49b74c:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:13:19,885 DEBUG [RS:0;b8365d49b74c:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:13:19,886 DEBUG [RS:0;b8365d49b74c:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:13:19,886 DEBUG [RS:0;b8365d49b74c:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:13:19,886 INFO [RS:0;b8365d49b74c:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:19,886 INFO [RS:0;b8365d49b74c:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:19,887 INFO [RS:0;b8365d49b74c:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:19,887 INFO [RS:0;b8365d49b74c:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:19,887 INFO [RS:0;b8365d49b74c:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,45577,1733076798835-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:13:19,906 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T18:13:19,908 INFO [RS:0;b8365d49b74c:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,45577,1733076798835-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:19,932 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.Replication(204): b8365d49b74c,45577,1733076798835 started 2024-12-01T18:13:19,932 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1767): Serving as b8365d49b74c,45577,1733076798835, RpcServer on b8365d49b74c/172.17.0.2:45577, sessionid=0x1004ec900550001 2024-12-01T18:13:19,933 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T18:13:19,933 DEBUG [RS:0;b8365d49b74c:45577 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b8365d49b74c,45577,1733076798835 2024-12-01T18:13:19,933 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,45577,1733076798835' 2024-12-01T18:13:19,933 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T18:13:19,935 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T18:13:19,935 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T18:13:19,935 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T18:13:19,936 DEBUG [RS:0;b8365d49b74c:45577 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b8365d49b74c,45577,1733076798835 2024-12-01T18:13:19,936 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,45577,1733076798835' 2024-12-01T18:13:19,936 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T18:13:19,936 WARN [b8365d49b74c:41393 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-01T18:13:19,936 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T18:13:19,937 DEBUG [RS:0;b8365d49b74c:45577 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T18:13:19,937 INFO [RS:0;b8365d49b74c:45577 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T18:13:19,937 INFO [RS:0;b8365d49b74c:45577 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T18:13:20,046 INFO [RS:0;b8365d49b74c:45577 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C45577%2C1733076798835, suffix=, logDir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835, archiveDir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/oldWALs, maxLogs=32 2024-12-01T18:13:20,049 INFO [RS:0;b8365d49b74c:45577 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C45577%2C1733076798835.1733076800049 2024-12-01T18:13:20,058 INFO [RS:0;b8365d49b74c:45577 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076800049 2024-12-01T18:13:20,058 DEBUG [RS:0;b8365d49b74c:45577 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:33153:33153)] 2024-12-01T18:13:20,187 DEBUG [b8365d49b74c:41393 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-01T18:13:20,192 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b8365d49b74c,45577,1733076798835 2024-12-01T18:13:20,197 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b8365d49b74c,45577,1733076798835, state=OPENING 2024-12-01T18:13:20,202 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T18:13:20,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:20,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:20,204 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:13:20,204 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:13:20,206 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=b8365d49b74c,45577,1733076798835}] 2024-12-01T18:13:20,381 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b8365d49b74c,45577,1733076798835 2024-12-01T18:13:20,382 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T18:13:20,385 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37668, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T18:13:20,396 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-01T18:13:20,397 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:13:20,401 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C45577%2C1733076798835.meta, suffix=.meta, logDir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835, archiveDir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/oldWALs, maxLogs=32 2024-12-01T18:13:20,404 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C45577%2C1733076798835.meta.1733076800404.meta 2024-12-01T18:13:20,413 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.meta.1733076800404.meta 2024-12-01T18:13:20,414 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:33153:33153)] 2024-12-01T18:13:20,414 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:13:20,415 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T18:13:20,476 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T18:13:20,481 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T18:13:20,486 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T18:13:20,487 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:13:20,487 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-01T18:13:20,487 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-01T18:13:20,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:13:20,493 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:13:20,493 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:20,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:13:20,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:13:20,495 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:13:20,495 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:20,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:13:20,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:13:20,498 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:13:20,498 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:20,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:13:20,501 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740 2024-12-01T18:13:20,503 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740 2024-12-01T18:13:20,506 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-01T18:13:20,509 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-01T18:13:20,510 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824489, jitterRate=0.04839213192462921}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-01T18:13:20,512 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-01T18:13:20,519 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733076800374 2024-12-01T18:13:20,532 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,45577,1733076798835 2024-12-01T18:13:20,532 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T18:13:20,532 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-01T18:13:20,533 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b8365d49b74c,45577,1733076798835, state=OPEN 2024-12-01T18:13:20,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:13:20,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:13:20,538 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:13:20,538 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:13:20,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T18:13:20,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=b8365d49b74c,45577,1733076798835 in 332 msec 2024-12-01T18:13:20,548 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T18:13:20,548 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 774 msec 2024-12-01T18:13:20,553 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 937 msec 2024-12-01T18:13:20,553 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733076800553, completionTime=-1 2024-12-01T18:13:20,554 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-01T18:13:20,554 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-01T18:13:20,598 DEBUG [hconnection-0x2cbba017-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:13:20,600 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37674, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:13:20,610 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-01T18:13:20,610 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733076860610 2024-12-01T18:13:20,610 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733076920610 2024-12-01T18:13:20,610 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 56 msec 2024-12-01T18:13:20,631 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41393,1733076798101-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:20,631 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41393,1733076798101-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:20,631 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41393,1733076798101-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:20,632 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b8365d49b74c:41393, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:20,633 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:20,638 DEBUG [master/b8365d49b74c:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-01T18:13:20,641 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-01T18:13:20,642 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:13:20,648 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-01T18:13:20,651 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T18:13:20,652 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:20,654 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T18:13:20,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741835_1011 (size=358) 2024-12-01T18:13:20,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741835_1011 (size=358) 2024-12-01T18:13:20,669 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3aeff3c0b3cd4a8b1d80782b2a0da031, NAME => 'hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12 2024-12-01T18:13:20,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741836_1012 (size=42) 2024-12-01T18:13:20,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741836_1012 (size=42) 2024-12-01T18:13:20,683 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:13:20,683 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 3aeff3c0b3cd4a8b1d80782b2a0da031, disabling compactions & flushes 2024-12-01T18:13:20,683 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. 2024-12-01T18:13:20,683 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. 2024-12-01T18:13:20,683 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. after waiting 0 ms 2024-12-01T18:13:20,683 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. 2024-12-01T18:13:20,683 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. 2024-12-01T18:13:20,683 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3aeff3c0b3cd4a8b1d80782b2a0da031: 2024-12-01T18:13:20,685 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T18:13:20,691 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733076800686"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733076800686"}]},"ts":"1733076800686"} 2024-12-01T18:13:20,716 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-01T18:13:20,718 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T18:13:20,722 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733076800718"}]},"ts":"1733076800718"} 2024-12-01T18:13:20,726 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-01T18:13:20,732 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3aeff3c0b3cd4a8b1d80782b2a0da031, ASSIGN}] 2024-12-01T18:13:20,735 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3aeff3c0b3cd4a8b1d80782b2a0da031, ASSIGN 2024-12-01T18:13:20,736 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=3aeff3c0b3cd4a8b1d80782b2a0da031, ASSIGN; state=OFFLINE, location=b8365d49b74c,45577,1733076798835; forceNewPlan=false, retain=false 2024-12-01T18:13:20,887 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3aeff3c0b3cd4a8b1d80782b2a0da031, regionState=OPENING, regionLocation=b8365d49b74c,45577,1733076798835 2024-12-01T18:13:20,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 3aeff3c0b3cd4a8b1d80782b2a0da031, server=b8365d49b74c,45577,1733076798835}] 2024-12-01T18:13:21,046 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b8365d49b74c,45577,1733076798835 2024-12-01T18:13:21,052 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. 2024-12-01T18:13:21,053 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 3aeff3c0b3cd4a8b1d80782b2a0da031, NAME => 'hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:13:21,053 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 3aeff3c0b3cd4a8b1d80782b2a0da031 2024-12-01T18:13:21,053 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:13:21,054 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 3aeff3c0b3cd4a8b1d80782b2a0da031 2024-12-01T18:13:21,054 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 3aeff3c0b3cd4a8b1d80782b2a0da031 2024-12-01T18:13:21,056 INFO [StoreOpener-3aeff3c0b3cd4a8b1d80782b2a0da031-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3aeff3c0b3cd4a8b1d80782b2a0da031 2024-12-01T18:13:21,059 INFO [StoreOpener-3aeff3c0b3cd4a8b1d80782b2a0da031-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3aeff3c0b3cd4a8b1d80782b2a0da031 columnFamilyName info 2024-12-01T18:13:21,059 DEBUG [StoreOpener-3aeff3c0b3cd4a8b1d80782b2a0da031-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:21,060 INFO [StoreOpener-3aeff3c0b3cd4a8b1d80782b2a0da031-1 {}] regionserver.HStore(327): Store=3aeff3c0b3cd4a8b1d80782b2a0da031/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:13:21,061 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/namespace/3aeff3c0b3cd4a8b1d80782b2a0da031 2024-12-01T18:13:21,062 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/namespace/3aeff3c0b3cd4a8b1d80782b2a0da031 2024-12-01T18:13:21,065 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 3aeff3c0b3cd4a8b1d80782b2a0da031 2024-12-01T18:13:21,069 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/namespace/3aeff3c0b3cd4a8b1d80782b2a0da031/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:13:21,070 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 3aeff3c0b3cd4a8b1d80782b2a0da031; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=848047, jitterRate=0.0783475935459137}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:13:21,071 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 3aeff3c0b3cd4a8b1d80782b2a0da031: 2024-12-01T18:13:21,073 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031., pid=6, masterSystemTime=1733076801046 2024-12-01T18:13:21,077 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. 2024-12-01T18:13:21,077 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. 2024-12-01T18:13:21,078 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3aeff3c0b3cd4a8b1d80782b2a0da031, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,45577,1733076798835 2024-12-01T18:13:21,086 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T18:13:21,086 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 3aeff3c0b3cd4a8b1d80782b2a0da031, server=b8365d49b74c,45577,1733076798835 in 190 msec 2024-12-01T18:13:21,089 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T18:13:21,090 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=3aeff3c0b3cd4a8b1d80782b2a0da031, ASSIGN in 354 msec 2024-12-01T18:13:21,091 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T18:13:21,091 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733076801091"}]},"ts":"1733076801091"} 2024-12-01T18:13:21,094 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-01T18:13:21,098 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T18:13:21,101 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 456 msec 2024-12-01T18:13:21,151 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-01T18:13:21,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:13:21,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:21,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:13:21,183 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-01T18:13:21,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:13:21,205 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 24 msec 2024-12-01T18:13:21,218 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-01T18:13:21,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:13:21,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-12-01T18:13:21,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-01T18:13:21,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-01T18:13:21,247 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 2.328sec 2024-12-01T18:13:21,248 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-01T18:13:21,249 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T18:13:21,250 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T18:13:21,251 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T18:13:21,251 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T18:13:21,252 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41393,1733076798101-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:13:21,252 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41393,1733076798101-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T18:13:21,259 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-01T18:13:21,259 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T18:13:21,260 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41393,1733076798101-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:13:21,298 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x50389d05 to 127.0.0.1:56284 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c4612d8 2024-12-01T18:13:21,298 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-01T18:13:21,305 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b719b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:13:21,307 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-01T18:13:21,307 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-01T18:13:21,317 DEBUG [hconnection-0x4e9b63cb-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:13:21,352 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37684, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:13:21,362 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=b8365d49b74c,41393,1733076798101 2024-12-01T18:13:21,363 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:13:21,370 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-01T18:13:21,376 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T18:13:21,380 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59404, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T18:13:21,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41393 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-01T18:13:21,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41393 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-01T18:13:21,390 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41393 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:13:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41393 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-01T18:13:21,394 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T18:13:21,395 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:21,396 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T18:13:21,397 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41393 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 9 2024-12-01T18:13:21,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41393 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-01T18:13:21,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741837_1013 (size=389) 2024-12-01T18:13:21,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741837_1013 (size=389) 2024-12-01T18:13:21,411 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5f48298de27791dfc9807e2f795ef8cf, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12 2024-12-01T18:13:21,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741838_1014 (size=72) 2024-12-01T18:13:21,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741838_1014 (size=72) 2024-12-01T18:13:21,424 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:13:21,424 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing 5f48298de27791dfc9807e2f795ef8cf, disabling compactions & flushes 2024-12-01T18:13:21,424 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:13:21,424 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:13:21,424 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. after waiting 0 ms 2024-12-01T18:13:21,424 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:13:21,424 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:13:21,425 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5f48298de27791dfc9807e2f795ef8cf: 2024-12-01T18:13:21,427 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T18:13:21,427 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733076801427"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733076801427"}]},"ts":"1733076801427"} 2024-12-01T18:13:21,430 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-01T18:13:21,432 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T18:13:21,432 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733076801432"}]},"ts":"1733076801432"} 2024-12-01T18:13:21,434 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-01T18:13:21,439 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5f48298de27791dfc9807e2f795ef8cf, ASSIGN}] 2024-12-01T18:13:21,441 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5f48298de27791dfc9807e2f795ef8cf, ASSIGN 2024-12-01T18:13:21,443 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5f48298de27791dfc9807e2f795ef8cf, ASSIGN; state=OFFLINE, location=b8365d49b74c,45577,1733076798835; forceNewPlan=false, retain=false 2024-12-01T18:13:21,593 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=5f48298de27791dfc9807e2f795ef8cf, regionState=OPENING, regionLocation=b8365d49b74c,45577,1733076798835 2024-12-01T18:13:21,597 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 5f48298de27791dfc9807e2f795ef8cf, server=b8365d49b74c,45577,1733076798835}] 2024-12-01T18:13:21,751 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b8365d49b74c,45577,1733076798835 2024-12-01T18:13:21,758 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:13:21,758 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 5f48298de27791dfc9807e2f795ef8cf, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:13:21,759 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:13:21,759 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:13:21,759 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:13:21,759 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:13:21,761 INFO [StoreOpener-5f48298de27791dfc9807e2f795ef8cf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:13:21,764 INFO [StoreOpener-5f48298de27791dfc9807e2f795ef8cf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5f48298de27791dfc9807e2f795ef8cf columnFamilyName info 2024-12-01T18:13:21,764 DEBUG [StoreOpener-5f48298de27791dfc9807e2f795ef8cf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:13:21,765 INFO [StoreOpener-5f48298de27791dfc9807e2f795ef8cf-1 {}] regionserver.HStore(327): Store=5f48298de27791dfc9807e2f795ef8cf/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:13:21,767 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:13:21,767 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:13:21,772 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:13:21,775 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:13:21,777 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 5f48298de27791dfc9807e2f795ef8cf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835315, jitterRate=0.06215885281562805}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:13:21,778 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 5f48298de27791dfc9807e2f795ef8cf: 2024-12-01T18:13:21,780 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf., pid=11, masterSystemTime=1733076801751 2024-12-01T18:13:21,783 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:13:21,783 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:13:21,784 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=5f48298de27791dfc9807e2f795ef8cf, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,45577,1733076798835 2024-12-01T18:13:21,791 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-01T18:13:21,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 5f48298de27791dfc9807e2f795ef8cf, server=b8365d49b74c,45577,1733076798835 in 190 msec 2024-12-01T18:13:21,795 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-01T18:13:21,795 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5f48298de27791dfc9807e2f795ef8cf, ASSIGN in 352 msec 2024-12-01T18:13:21,796 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T18:13:21,796 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733076801796"}]},"ts":"1733076801796"} 2024-12-01T18:13:21,799 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-01T18:13:21,802 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T18:13:21,804 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 412 msec 2024-12-01T18:13:25,991 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-01T18:13:26,041 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-01T18:13:26,043 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-01T18:13:26,044 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-01T18:13:28,602 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-01T18:13:28,602 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-01T18:13:28,604 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-01T18:13:28,604 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-01T18:13:28,606 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-01T18:13:28,606 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-01T18:13:28,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T18:13:28,607 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-01T18:13:28,608 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-01T18:13:28,608 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-01T18:13:31,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41393 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-01T18:13:31,409 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling, procId: 9 completed 2024-12-01T18:13:31,413 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-01T18:13:31,414 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:13:31,415 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C45577%2C1733076798835.1733076811414 2024-12-01T18:13:31,426 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076800049 with entries=4, filesize=947 B; new WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076811414 2024-12-01T18:13:31,427 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33153:33153),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-01T18:13:31,427 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076800049 is not closed yet, will try archiving it next time 2024-12-01T18:13:31,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741833_1009 (size=955) 2024-12-01T18:13:31,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741833_1009 (size=955) 2024-12-01T18:13:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45577 {}] regionserver.HRegion(8581): Flush requested on 5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:13:43,457 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f48298de27791dfc9807e2f795ef8cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-01T18:13:43,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/4b68f3c6fbf7402195bfa0975885daaa is 1080, key is row0001/info:/1733076811433/Put/seqid=0 2024-12-01T18:13:43,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741840_1016 (size=12509) 2024-12-01T18:13:43,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741840_1016 (size=12509) 2024-12-01T18:13:43,531 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/4b68f3c6fbf7402195bfa0975885daaa 2024-12-01T18:13:43,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/4b68f3c6fbf7402195bfa0975885daaa as hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/4b68f3c6fbf7402195bfa0975885daaa 2024-12-01T18:13:43,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/4b68f3c6fbf7402195bfa0975885daaa, entries=7, sequenceid=11, filesize=12.2 K 2024-12-01T18:13:43,595 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 5f48298de27791dfc9807e2f795ef8cf in 138ms, sequenceid=11, compaction requested=false 2024-12-01T18:13:43,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f48298de27791dfc9807e2f795ef8cf: 2024-12-01T18:13:47,229 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T18:13:50,714 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:13:50,717 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33292, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:13:51,467 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C45577%2C1733076798835.1733076831467 2024-12-01T18:13:51,677 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:13:51,679 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076811414 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076831467 2024-12-01T18:13:51,679 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33153:33153),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-01T18:13:51,679 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076811414 is not closed yet, will try archiving it next time 2024-12-01T18:13:51,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741839_1015 (size=12399) 2024-12-01T18:13:51,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741839_1015 (size=12399) 2024-12-01T18:13:51,881 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:13:54,085 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:13:56,288 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:13:58,491 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:13:58,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45577 {}] regionserver.HRegion(8581): Flush requested on 5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:13:58,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f48298de27791dfc9807e2f795ef8cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-01T18:13:58,694 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:13:58,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/a7bde9b85c8c4eefa188e06b3c343ac8 is 1080, key is row0008/info:/1733076825458/Put/seqid=0 2024-12-01T18:13:58,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741842_1018 (size=12509) 2024-12-01T18:13:58,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741842_1018 (size=12509) 2024-12-01T18:13:58,707 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/a7bde9b85c8c4eefa188e06b3c343ac8 2024-12-01T18:13:58,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/a7bde9b85c8c4eefa188e06b3c343ac8 as hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/a7bde9b85c8c4eefa188e06b3c343ac8 2024-12-01T18:13:58,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/a7bde9b85c8c4eefa188e06b3c343ac8, entries=7, sequenceid=21, filesize=12.2 K 2024-12-01T18:13:58,928 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:13:58,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 5f48298de27791dfc9807e2f795ef8cf in 436ms, sequenceid=21, compaction requested=false 2024-12-01T18:13:58,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f48298de27791dfc9807e2f795ef8cf: 2024-12-01T18:13:58,928 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=24.4 K, sizeToCheck=16.0 K 2024-12-01T18:13:58,928 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-01T18:13:58,929 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/4b68f3c6fbf7402195bfa0975885daaa because midkey is the same as first or last row 2024-12-01T18:14:00,695 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:01,764 INFO [master/b8365d49b74c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-01T18:14:01,764 INFO [master/b8365d49b74c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-01T18:14:02,898 WARN [sync.1 {}] wal.AbstractFSWAL(1346): Requesting log roll because we exceeded slow sync threshold; count=7, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:02,900 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C45577%2C1733076798835:(num 1733076831467) roll requested 2024-12-01T18:14:02,900 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:02,900 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C45577%2C1733076798835.1733076842900 2024-12-01T18:14:03,108 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:03,308 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:03,309 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076831467 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076842900 2024-12-01T18:14:03,309 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:33153:33153)] 2024-12-01T18:14:03,309 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076831467 is not closed yet, will try archiving it next time 2024-12-01T18:14:03,311 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076811414 to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/oldWALs/b8365d49b74c%2C45577%2C1733076798835.1733076811414 2024-12-01T18:14:03,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741841_1017 (size=7739) 2024-12-01T18:14:03,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741841_1017 (size=7739) 2024-12-01T18:14:05,102 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:06,759 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 5f48298de27791dfc9807e2f795ef8cf, had cached 0 bytes from a total of 25018 2024-12-01T18:14:07,305 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:09,508 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:11,711 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:13,714 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T18:14:13,714 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C45577%2C1733076798835.1733076853714 2024-12-01T18:14:17,229 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T18:14:18,723 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:18,723 WARN [Time-limited test {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:18,723 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C45577%2C1733076798835:(num 1733076853714) roll requested 2024-12-01T18:14:20,651 DEBUG [master/b8365d49b74c:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 3aeff3c0b3cd4a8b1d80782b2a0da031 changed from -1.0 to 0.0, refreshing cache 2024-12-01T18:14:23,724 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:23,724 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:23,725 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076842900 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076853714 2024-12-01T18:14:23,725 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33153:33153),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-01T18:14:23,725 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076842900 is not closed yet, will try archiving it next time 2024-12-01T18:14:23,726 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C45577%2C1733076798835.1733076863725 2024-12-01T18:14:23,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741843_1019 (size=4753) 2024-12-01T18:14:23,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741843_1019 (size=4753) 2024-12-01T18:14:28,728 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:28,728 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:28,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45577 {}] regionserver.HRegion(8581): Flush requested on 5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:14:28,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f48298de27791dfc9807e2f795ef8cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-01T18:14:28,734 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:28,734 WARN [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:30,729 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T18:14:33,730 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:33,730 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:33,735 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:33,735 WARN [sync.0 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:33,736 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076853714 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076863725 2024-12-01T18:14:33,736 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:33153:33153)] 2024-12-01T18:14:33,736 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076853714 is not closed yet, will try archiving it next time 2024-12-01T18:14:33,736 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C45577%2C1733076798835:(num 1733076863725) roll requested 2024-12-01T18:14:33,737 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C45577%2C1733076798835.1733076873736 2024-12-01T18:14:33,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/d09d328c5bfa417b8499756b7fbe1c6e is 1080, key is row0015/info:/1733076840494/Put/seqid=0 2024-12-01T18:14:33,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741844_1020 (size=1569) 2024-12-01T18:14:33,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741844_1020 (size=1569) 2024-12-01T18:14:33,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741846_1022 (size=12509) 2024-12-01T18:14:33,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741846_1022 (size=12509) 2024-12-01T18:14:33,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/d09d328c5bfa417b8499756b7fbe1c6e 2024-12-01T18:14:33,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/d09d328c5bfa417b8499756b7fbe1c6e as hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/d09d328c5bfa417b8499756b7fbe1c6e 2024-12-01T18:14:33,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/d09d328c5bfa417b8499756b7fbe1c6e, entries=7, sequenceid=31, filesize=12.2 K 2024-12-01T18:14:38,759 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:38,759 WARN [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:38,790 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:38,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 5f48298de27791dfc9807e2f795ef8cf in 10062ms, sequenceid=31, compaction requested=true 2024-12-01T18:14:38,790 WARN [sync.1 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:38,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f48298de27791dfc9807e2f795ef8cf: 2024-12-01T18:14:38,790 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=36.6 K, sizeToCheck=16.0 K 2024-12-01T18:14:38,791 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-01T18:14:38,791 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/4b68f3c6fbf7402195bfa0975885daaa because midkey is the same as first or last row 2024-12-01T18:14:38,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5f48298de27791dfc9807e2f795ef8cf:info, priority=-2147483648, current under compaction store size is 1 2024-12-01T18:14:38,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:14:38,793 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-01T18:14:38,797 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-01T18:14:38,799 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] regionserver.HStore(1540): 5f48298de27791dfc9807e2f795ef8cf/info is initiating minor compaction (all files) 2024-12-01T18:14:38,799 INFO [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5f48298de27791dfc9807e2f795ef8cf/info in TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:14:38,799 INFO [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/4b68f3c6fbf7402195bfa0975885daaa, hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/a7bde9b85c8c4eefa188e06b3c343ac8, hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/d09d328c5bfa417b8499756b7fbe1c6e] into tmpdir=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp, totalSize=36.6 K 2024-12-01T18:14:38,801 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b68f3c6fbf7402195bfa0975885daaa, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733076811433 2024-12-01T18:14:38,802 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7bde9b85c8c4eefa188e06b3c343ac8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733076825458 2024-12-01T18:14:38,803 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] compactions.Compactor(224): Compacting d09d328c5bfa417b8499756b7fbe1c6e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733076840494 2024-12-01T18:14:38,834 INFO [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5f48298de27791dfc9807e2f795ef8cf#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-01T18:14:38,835 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/23063ee473d349ab9ee0f118a5abb353 is 1080, key is row0001/info:/1733076811433/Put/seqid=0 2024-12-01T18:14:38,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741848_1024 (size=27710) 2024-12-01T18:14:38,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741848_1024 (size=27710) 2024-12-01T18:14:38,861 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/23063ee473d349ab9ee0f118a5abb353 as hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/23063ee473d349ab9ee0f118a5abb353 2024-12-01T18:14:43,760 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:43,760 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK]] 2024-12-01T18:14:43,761 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076863725 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076873736 2024-12-01T18:14:43,761 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33153:33153),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-01T18:14:43,761 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076863725 is not closed yet, will try archiving it next time 2024-12-01T18:14:43,761 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076831467 to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/oldWALs/b8365d49b74c%2C45577%2C1733076798835.1733076831467 2024-12-01T18:14:43,761 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C45577%2C1733076798835:(num 1733076883761) roll requested 2024-12-01T18:14:43,761 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C45577%2C1733076798835.1733076883761 2024-12-01T18:14:43,763 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076842900 to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/oldWALs/b8365d49b74c%2C45577%2C1733076798835.1733076842900 2024-12-01T18:14:43,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741845_1021 (size=438) 2024-12-01T18:14:43,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741845_1021 (size=438) 2024-12-01T18:14:43,765 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076853714 to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/oldWALs/b8365d49b74c%2C45577%2C1733076798835.1733076853714 2024-12-01T18:14:43,767 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076863725 to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/oldWALs/b8365d49b74c%2C45577%2C1733076798835.1733076863725 2024-12-01T18:14:47,230 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T18:14:48,762 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:48,762 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:48,764 INFO [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5f48298de27791dfc9807e2f795ef8cf/info of 5f48298de27791dfc9807e2f795ef8cf into 23063ee473d349ab9ee0f118a5abb353(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 9sec to execute. 2024-12-01T18:14:48,764 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5f48298de27791dfc9807e2f795ef8cf: 2024-12-01T18:14:48,764 INFO [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf., storeName=5f48298de27791dfc9807e2f795ef8cf/info, priority=13, startTime=1733076878792; duration=9sec 2024-12-01T18:14:48,765 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=27.1 K, sizeToCheck=16.0 K 2024-12-01T18:14:48,765 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-01T18:14:48,765 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/23063ee473d349ab9ee0f118a5abb353 because midkey is the same as first or last row 2024-12-01T18:14:48,765 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:14:48,765 DEBUG [RS:0;b8365d49b74c:45577-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5f48298de27791dfc9807e2f795ef8cf:info 2024-12-01T18:14:48,770 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:48,770 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38223,DS-8282ea61-5623-4592-b32c-ccc6ebc71209,DISK], DatanodeInfoWithStorage[127.0.0.1:34749,DS-21d65a05-a647-4fad-a102-f910c8d693c8,DISK]] 2024-12-01T18:14:48,770 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076873736 with entries=1, filesize=531 B; new WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076883761 2024-12-01T18:14:48,771 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:33153:33153)] 2024-12-01T18:14:48,771 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076873736 is not closed yet, will try archiving it next time 2024-12-01T18:14:48,771 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C45577%2C1733076798835.1733076888771 2024-12-01T18:14:48,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741847_1023 (size=539) 2024-12-01T18:14:48,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741847_1023 (size=539) 2024-12-01T18:14:48,774 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076873736 to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/oldWALs/b8365d49b74c%2C45577%2C1733076798835.1733076873736 2024-12-01T18:14:48,782 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076883761 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076888771 2024-12-01T18:14:48,782 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35333:35333),(127.0.0.1/127.0.0.1:33153:33153)] 2024-12-01T18:14:48,782 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076883761 is not closed yet, will try archiving it next time 2024-12-01T18:14:48,782 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C45577%2C1733076798835:(num 1733076888771) roll requested 2024-12-01T18:14:48,783 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C45577%2C1733076798835.1733076888782 2024-12-01T18:14:48,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741849_1025 (size=1258) 2024-12-01T18:14:48,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741849_1025 (size=1258) 2024-12-01T18:14:48,790 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076888771 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076888782 2024-12-01T18:14:48,790 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33153:33153),(127.0.0.1/127.0.0.1:35333:35333)] 2024-12-01T18:14:48,790 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076888771 is not closed yet, will try archiving it next time 2024-12-01T18:14:48,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741850_1026 (size=93) 2024-12-01T18:14:48,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741850_1026 (size=93) 2024-12-01T18:14:48,792 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835/b8365d49b74c%2C45577%2C1733076798835.1733076888771 to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/oldWALs/b8365d49b74c%2C45577%2C1733076798835.1733076888771 2024-12-01T18:14:51,759 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 5f48298de27791dfc9807e2f795ef8cf, had cached 0 bytes from a total of 27710 2024-12-01T18:15:00,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45577 {}] regionserver.HRegion(8581): Flush requested on 5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:15:00,791 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5f48298de27791dfc9807e2f795ef8cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-01T18:15:00,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/8e535e1c3b614ba084d345b60759333a is 1080, key is row0022/info:/1733076888772/Put/seqid=0 2024-12-01T18:15:00,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741852_1028 (size=12509) 2024-12-01T18:15:00,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741852_1028 (size=12509) 2024-12-01T18:15:00,813 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/8e535e1c3b614ba084d345b60759333a 2024-12-01T18:15:00,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/8e535e1c3b614ba084d345b60759333a as hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/8e535e1c3b614ba084d345b60759333a 2024-12-01T18:15:00,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/8e535e1c3b614ba084d345b60759333a, entries=7, sequenceid=42, filesize=12.2 K 2024-12-01T18:15:00,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 5f48298de27791dfc9807e2f795ef8cf in 43ms, sequenceid=42, compaction requested=false 2024-12-01T18:15:00,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5f48298de27791dfc9807e2f795ef8cf: 2024-12-01T18:15:00,835 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=39.3 K, sizeToCheck=16.0 K 2024-12-01T18:15:00,835 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-01T18:15:00,835 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/23063ee473d349ab9ee0f118a5abb353 because midkey is the same as first or last row 2024-12-01T18:15:08,799 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-01T18:15:08,800 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-01T18:15:08,800 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x50389d05 to 127.0.0.1:56284 2024-12-01T18:15:08,800 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:15:08,801 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T18:15:08,801 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=531063168, stopped=false 2024-12-01T18:15:08,802 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=b8365d49b74c,41393,1733076798101 2024-12-01T18:15:08,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:15:08,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:15:08,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:08,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:08,805 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-01T18:15:08,805 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:15:08,805 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:15:08,805 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b8365d49b74c,45577,1733076798835' ***** 2024-12-01T18:15:08,805 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:15:08,805 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-01T18:15:08,805 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T18:15:08,806 INFO [RS:0;b8365d49b74c:45577 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T18:15:08,806 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-01T18:15:08,806 INFO [RS:0;b8365d49b74c:45577 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T18:15:08,806 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(3579): Received CLOSE for 3aeff3c0b3cd4a8b1d80782b2a0da031 2024-12-01T18:15:08,808 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(3579): Received CLOSE for 5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:15:08,808 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1224): stopping server b8365d49b74c,45577,1733076798835 2024-12-01T18:15:08,808 DEBUG [RS:0;b8365d49b74c:45577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:15:08,808 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T18:15:08,808 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T18:15:08,808 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T18:15:08,808 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-01T18:15:08,809 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-01T18:15:08,809 DEBUG [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1603): Online Regions={3aeff3c0b3cd4a8b1d80782b2a0da031=hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031., 5f48298de27791dfc9807e2f795ef8cf=TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf., 1588230740=hbase:meta,,1.1588230740} 2024-12-01T18:15:08,809 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 3aeff3c0b3cd4a8b1d80782b2a0da031, disabling compactions & flushes 2024-12-01T18:15:08,809 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. 2024-12-01T18:15:08,809 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. 2024-12-01T18:15:08,809 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. after waiting 0 ms 2024-12-01T18:15:08,809 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. 2024-12-01T18:15:08,809 DEBUG [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3aeff3c0b3cd4a8b1d80782b2a0da031, 5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:15:08,809 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 3aeff3c0b3cd4a8b1d80782b2a0da031 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-01T18:15:08,810 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:15:08,810 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:15:08,810 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:15:08,810 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:15:08,810 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:15:08,811 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.81 KB heapSize=5.32 KB 2024-12-01T18:15:08,851 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/namespace/3aeff3c0b3cd4a8b1d80782b2a0da031/.tmp/info/1883043d33f942fea77ca15c89ed310a is 45, key is default/info:d/1733076801193/Put/seqid=0 2024-12-01T18:15:08,886 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740/.tmp/info/ed3f7bdf35d84d8fbf6948fc50c8f958 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf./info:regioninfo/1733076801784/Put/seqid=0 2024-12-01T18:15:08,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741853_1029 (size=5037) 2024-12-01T18:15:08,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741853_1029 (size=5037) 2024-12-01T18:15:08,956 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/namespace/3aeff3c0b3cd4a8b1d80782b2a0da031/.tmp/info/1883043d33f942fea77ca15c89ed310a 2024-12-01T18:15:08,965 INFO [regionserver/b8365d49b74c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-01T18:15:08,965 INFO [regionserver/b8365d49b74c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-01T18:15:08,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741854_1030 (size=8172) 2024-12-01T18:15:09,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741854_1030 (size=8172) 2024-12-01T18:15:09,001 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.59 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740/.tmp/info/ed3f7bdf35d84d8fbf6948fc50c8f958 2024-12-01T18:15:09,010 DEBUG [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3aeff3c0b3cd4a8b1d80782b2a0da031, 5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:15:09,012 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/namespace/3aeff3c0b3cd4a8b1d80782b2a0da031/.tmp/info/1883043d33f942fea77ca15c89ed310a as hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/namespace/3aeff3c0b3cd4a8b1d80782b2a0da031/info/1883043d33f942fea77ca15c89ed310a 2024-12-01T18:15:09,022 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/namespace/3aeff3c0b3cd4a8b1d80782b2a0da031/info/1883043d33f942fea77ca15c89ed310a, entries=2, sequenceid=6, filesize=4.9 K 2024-12-01T18:15:09,024 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 3aeff3c0b3cd4a8b1d80782b2a0da031 in 215ms, sequenceid=6, compaction requested=false 2024-12-01T18:15:09,092 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/namespace/3aeff3c0b3cd4a8b1d80782b2a0da031/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T18:15:09,096 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. 2024-12-01T18:15:09,096 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 3aeff3c0b3cd4a8b1d80782b2a0da031: 2024-12-01T18:15:09,096 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733076800641.3aeff3c0b3cd4a8b1d80782b2a0da031. 2024-12-01T18:15:09,096 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 5f48298de27791dfc9807e2f795ef8cf, disabling compactions & flushes 2024-12-01T18:15:09,097 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:15:09,097 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:15:09,097 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. after waiting 0 ms 2024-12-01T18:15:09,097 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:15:09,097 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 5f48298de27791dfc9807e2f795ef8cf 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-01T18:15:09,098 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740/.tmp/table/2f8b986fc6e94370bb2087ef615c104e is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733076801796/Put/seqid=0 2024-12-01T18:15:09,112 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/743705f8bfdd43b7996cfdfa8021c88e is 1080, key is row0029/info:/1733076902792/Put/seqid=0 2024-12-01T18:15:09,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741855_1031 (size=5452) 2024-12-01T18:15:09,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741855_1031 (size=5452) 2024-12-01T18:15:09,158 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=232 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740/.tmp/table/2f8b986fc6e94370bb2087ef615c104e 2024-12-01T18:15:09,169 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740/.tmp/info/ed3f7bdf35d84d8fbf6948fc50c8f958 as hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740/info/ed3f7bdf35d84d8fbf6948fc50c8f958 2024-12-01T18:15:09,179 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740/info/ed3f7bdf35d84d8fbf6948fc50c8f958, entries=20, sequenceid=14, filesize=8.0 K 2024-12-01T18:15:09,180 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740/.tmp/table/2f8b986fc6e94370bb2087ef615c104e as hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740/table/2f8b986fc6e94370bb2087ef615c104e 2024-12-01T18:15:09,190 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740/table/2f8b986fc6e94370bb2087ef615c104e, entries=4, sequenceid=14, filesize=5.3 K 2024-12-01T18:15:09,192 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.81 KB/2882, heapSize ~5.04 KB/5160, currentSize=0 B/0 for 1588230740 in 381ms, sequenceid=14, compaction requested=false 2024-12-01T18:15:09,210 DEBUG [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 5f48298de27791dfc9807e2f795ef8cf 2024-12-01T18:15:09,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741856_1032 (size=8193) 2024-12-01T18:15:09,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741856_1032 (size=8193) 2024-12-01T18:15:09,224 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/743705f8bfdd43b7996cfdfa8021c88e 2024-12-01T18:15:09,253 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/.tmp/info/743705f8bfdd43b7996cfdfa8021c88e as hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/743705f8bfdd43b7996cfdfa8021c88e 2024-12-01T18:15:09,263 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/743705f8bfdd43b7996cfdfa8021c88e, entries=3, sequenceid=48, filesize=8.0 K 2024-12-01T18:15:09,265 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 5f48298de27791dfc9807e2f795ef8cf in 168ms, sequenceid=48, compaction requested=true 2024-12-01T18:15:09,269 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-01T18:15:09,270 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T18:15:09,270 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-01T18:15:09,271 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:15:09,271 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-01T18:15:09,278 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/4b68f3c6fbf7402195bfa0975885daaa, hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/a7bde9b85c8c4eefa188e06b3c343ac8, hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/d09d328c5bfa417b8499756b7fbe1c6e] to archive 2024-12-01T18:15:09,288 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-01T18:15:09,292 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/4b68f3c6fbf7402195bfa0975885daaa to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/archive/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/4b68f3c6fbf7402195bfa0975885daaa 2024-12-01T18:15:09,294 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/a7bde9b85c8c4eefa188e06b3c343ac8 to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/archive/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/a7bde9b85c8c4eefa188e06b3c343ac8 2024-12-01T18:15:09,296 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/d09d328c5bfa417b8499756b7fbe1c6e to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/archive/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/info/d09d328c5bfa417b8499756b7fbe1c6e 2024-12-01T18:15:09,362 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/data/default/TestLogRolling-testSlowSyncLogRolling/5f48298de27791dfc9807e2f795ef8cf/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-01T18:15:09,363 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:15:09,364 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 5f48298de27791dfc9807e2f795ef8cf: 2024-12-01T18:15:09,364 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733076801386.5f48298de27791dfc9807e2f795ef8cf. 2024-12-01T18:15:09,411 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1250): stopping server b8365d49b74c,45577,1733076798835; all regions closed. 2024-12-01T18:15:09,428 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835 2024-12-01T18:15:09,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741834_1010 (size=4330) 2024-12-01T18:15:09,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741834_1010 (size=4330) 2024-12-01T18:15:09,436 DEBUG [RS:0;b8365d49b74c:45577 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/oldWALs 2024-12-01T18:15:09,436 INFO [RS:0;b8365d49b74c:45577 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b8365d49b74c%2C45577%2C1733076798835.meta:.meta(num 1733076800404) 2024-12-01T18:15:09,453 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/WALs/b8365d49b74c,45577,1733076798835 2024-12-01T18:15:09,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741851_1027 (size=13066) 2024-12-01T18:15:09,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741851_1027 (size=13066) 2024-12-01T18:15:09,465 DEBUG [RS:0;b8365d49b74c:45577 {}] wal.AbstractFSWAL(1071): Moved 3 WAL file(s) to /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/oldWALs 2024-12-01T18:15:09,465 INFO [RS:0;b8365d49b74c:45577 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b8365d49b74c%2C45577%2C1733076798835:(num 1733076888782) 2024-12-01T18:15:09,465 DEBUG [RS:0;b8365d49b74c:45577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:15:09,465 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:15:09,466 INFO [RS:0;b8365d49b74c:45577 {}] hbase.ChoreService(370): Chore service for: regionserver/b8365d49b74c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-01T18:15:09,467 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-01T18:15:09,467 INFO [RS:0;b8365d49b74c:45577 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45577 2024-12-01T18:15:09,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:15:09,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b8365d49b74c,45577,1733076798835 2024-12-01T18:15:09,474 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b8365d49b74c,45577,1733076798835] 2024-12-01T18:15:09,474 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b8365d49b74c,45577,1733076798835; numProcessing=1 2024-12-01T18:15:09,476 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b8365d49b74c,45577,1733076798835 already deleted, retry=false 2024-12-01T18:15:09,476 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b8365d49b74c,45577,1733076798835 expired; onlineServers=0 2024-12-01T18:15:09,476 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b8365d49b74c,41393,1733076798101' ***** 2024-12-01T18:15:09,476 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T18:15:09,476 DEBUG [M:0;b8365d49b74c:41393 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51fe3b4c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b8365d49b74c/172.17.0.2:0 2024-12-01T18:15:09,477 INFO [M:0;b8365d49b74c:41393 {}] regionserver.HRegionServer(1224): stopping server b8365d49b74c,41393,1733076798101 2024-12-01T18:15:09,477 INFO [M:0;b8365d49b74c:41393 {}] regionserver.HRegionServer(1250): stopping server b8365d49b74c,41393,1733076798101; all regions closed. 2024-12-01T18:15:09,477 DEBUG [M:0;b8365d49b74c:41393 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:15:09,477 DEBUG [M:0;b8365d49b74c:41393 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T18:15:09,477 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T18:15:09,477 DEBUG [M:0;b8365d49b74c:41393 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T18:15:09,477 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733076799710 {}] cleaner.HFileCleaner(306): Exit Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733076799710,5,FailOnTimeoutGroup] 2024-12-01T18:15:09,477 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733076799710 {}] cleaner.HFileCleaner(306): Exit Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733076799710,5,FailOnTimeoutGroup] 2024-12-01T18:15:09,477 INFO [M:0;b8365d49b74c:41393 {}] hbase.ChoreService(370): Chore service for: master/b8365d49b74c:0 had [] on shutdown 2024-12-01T18:15:09,477 DEBUG [M:0;b8365d49b74c:41393 {}] master.HMaster(1733): Stopping service threads 2024-12-01T18:15:09,478 INFO [M:0;b8365d49b74c:41393 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T18:15:09,478 INFO [M:0;b8365d49b74c:41393 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T18:15:09,478 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T18:15:09,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T18:15:09,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:09,492 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:15:09,492 DEBUG [M:0;b8365d49b74c:41393 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/master already deleted, retry=false 2024-12-01T18:15:09,492 DEBUG [M:0;b8365d49b74c:41393 {}] master.ActiveMasterManager(353): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-01T18:15:09,492 INFO [M:0;b8365d49b74c:41393 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-01T18:15:09,493 INFO [M:0;b8365d49b74c:41393 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T18:15:09,493 DEBUG [M:0;b8365d49b74c:41393 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:15:09,493 INFO [M:0;b8365d49b74c:41393 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:15:09,493 DEBUG [M:0;b8365d49b74c:41393 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:15:09,493 DEBUG [M:0;b8365d49b74c:41393 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:15:09,493 DEBUG [M:0;b8365d49b74c:41393 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:15:09,494 INFO [M:0;b8365d49b74c:41393 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.22 KB heapSize=50.15 KB 2024-12-01T18:15:09,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:15:09,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x1004ec900550001, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:15:09,579 INFO [RS:0;b8365d49b74c:45577 {}] regionserver.HRegionServer(1307): Exiting; stopping=b8365d49b74c,45577,1733076798835; zookeeper connection closed. 2024-12-01T18:15:09,592 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@56149408 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@56149408 2024-12-01T18:15:09,593 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-01T18:15:09,595 DEBUG [M:0;b8365d49b74c:41393 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a4622606d181427baa58406b4a2e8dba is 82, key is hbase:meta,,1/info:regioninfo/1733076800531/Put/seqid=0 2024-12-01T18:15:09,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741857_1033 (size=5672) 2024-12-01T18:15:09,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741857_1033 (size=5672) 2024-12-01T18:15:09,685 INFO [M:0;b8365d49b74c:41393 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a4622606d181427baa58406b4a2e8dba 2024-12-01T18:15:09,780 DEBUG [M:0;b8365d49b74c:41393 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c49577e5ba9d4868b8c2a3166639e0a8 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733076801803/Put/seqid=0 2024-12-01T18:15:09,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741858_1034 (size=6427) 2024-12-01T18:15:09,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741858_1034 (size=6427) 2024-12-01T18:15:09,869 INFO [M:0;b8365d49b74c:41393 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.62 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c49577e5ba9d4868b8c2a3166639e0a8 2024-12-01T18:15:09,879 INFO [M:0;b8365d49b74c:41393 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c49577e5ba9d4868b8c2a3166639e0a8 2024-12-01T18:15:09,891 INFO [regionserver/b8365d49b74c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:15:09,928 DEBUG [M:0;b8365d49b74c:41393 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/22299c58195940cfb22a725772359cb2 is 69, key is b8365d49b74c,45577,1733076798835/rs:state/1733076799814/Put/seqid=0 2024-12-01T18:15:09,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741859_1035 (size=5156) 2024-12-01T18:15:09,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741859_1035 (size=5156) 2024-12-01T18:15:09,952 INFO [M:0;b8365d49b74c:41393 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/22299c58195940cfb22a725772359cb2 2024-12-01T18:15:09,987 DEBUG [M:0;b8365d49b74c:41393 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/287dc274c99f4c738dd3a197c126ca1e is 52, key is load_balancer_on/state:d/1733076801368/Put/seqid=0 2024-12-01T18:15:09,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741860_1036 (size=5056) 2024-12-01T18:15:09,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741860_1036 (size=5056) 2024-12-01T18:15:09,995 INFO [M:0;b8365d49b74c:41393 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/287dc274c99f4c738dd3a197c126ca1e 2024-12-01T18:15:10,003 DEBUG [M:0;b8365d49b74c:41393 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a4622606d181427baa58406b4a2e8dba as hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a4622606d181427baa58406b4a2e8dba 2024-12-01T18:15:10,011 INFO [M:0;b8365d49b74c:41393 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a4622606d181427baa58406b4a2e8dba, entries=8, sequenceid=104, filesize=5.5 K 2024-12-01T18:15:10,012 DEBUG [M:0;b8365d49b74c:41393 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c49577e5ba9d4868b8c2a3166639e0a8 as hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c49577e5ba9d4868b8c2a3166639e0a8 2024-12-01T18:15:10,020 INFO [M:0;b8365d49b74c:41393 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c49577e5ba9d4868b8c2a3166639e0a8 2024-12-01T18:15:10,020 INFO [M:0;b8365d49b74c:41393 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c49577e5ba9d4868b8c2a3166639e0a8, entries=11, sequenceid=104, filesize=6.3 K 2024-12-01T18:15:10,023 DEBUG [M:0;b8365d49b74c:41393 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/22299c58195940cfb22a725772359cb2 as hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/22299c58195940cfb22a725772359cb2 2024-12-01T18:15:10,031 INFO [M:0;b8365d49b74c:41393 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/22299c58195940cfb22a725772359cb2, entries=1, sequenceid=104, filesize=5.0 K 2024-12-01T18:15:10,033 DEBUG [M:0;b8365d49b74c:41393 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/287dc274c99f4c738dd3a197c126ca1e as hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/287dc274c99f4c738dd3a197c126ca1e 2024-12-01T18:15:10,041 INFO [M:0;b8365d49b74c:41393 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/287dc274c99f4c738dd3a197c126ca1e, entries=1, sequenceid=104, filesize=4.9 K 2024-12-01T18:15:10,042 INFO [M:0;b8365d49b74c:41393 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.22 KB/41185, heapSize ~50.09 KB/51288, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 549ms, sequenceid=104, compaction requested=false 2024-12-01T18:15:10,051 INFO [M:0;b8365d49b74c:41393 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:15:10,051 DEBUG [M:0;b8365d49b74c:41393 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:15:10,053 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/MasterData/WALs/b8365d49b74c,41393,1733076798101 2024-12-01T18:15:10,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38223 is added to blk_1073741830_1006 (size=48486) 2024-12-01T18:15:10,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34749 is added to blk_1073741830_1006 (size=48486) 2024-12-01T18:15:10,065 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-01T18:15:10,065 INFO [M:0;b8365d49b74c:41393 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-01T18:15:10,066 INFO [M:0;b8365d49b74c:41393 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41393 2024-12-01T18:15:10,071 DEBUG [M:0;b8365d49b74c:41393 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/b8365d49b74c,41393,1733076798101 already deleted, retry=false 2024-12-01T18:15:10,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:15:10,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41393-0x1004ec900550000, quorum=127.0.0.1:56284, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:15:10,177 INFO [M:0;b8365d49b74c:41393 {}] regionserver.HRegionServer(1307): Exiting; stopping=b8365d49b74c,41393,1733076798101; zookeeper connection closed. 2024-12-01T18:15:10,185 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c482eac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:15:10,189 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@408e1d66{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:15:10,189 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:15:10,189 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4debea22{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:15:10,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@670df016{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/hadoop.log.dir/,STOPPED} 2024-12-01T18:15:10,196 WARN [BP-1507522640-172.17.0.2-1733076794999 heartbeating to localhost/127.0.0.1:39817 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:15:10,196 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:15:10,196 WARN [BP-1507522640-172.17.0.2-1733076794999 heartbeating to localhost/127.0.0.1:39817 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1507522640-172.17.0.2-1733076794999 (Datanode Uuid 54747de7-1696-43b2-a754-56ff5599f560) service to localhost/127.0.0.1:39817 2024-12-01T18:15:10,196 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:15:10,197 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/cluster_c1d378f7-0a5d-f163-9b2b-755009fab9b6/dfs/data/data3/current/BP-1507522640-172.17.0.2-1733076794999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:15:10,198 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/cluster_c1d378f7-0a5d-f163-9b2b-755009fab9b6/dfs/data/data4/current/BP-1507522640-172.17.0.2-1733076794999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:15:10,198 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:15:10,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c4b1b4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:15:10,205 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9fb4bbe{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:15:10,205 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:15:10,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2276bd44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:15:10,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3cf7922e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/hadoop.log.dir/,STOPPED} 2024-12-01T18:15:10,210 WARN [BP-1507522640-172.17.0.2-1733076794999 heartbeating to localhost/127.0.0.1:39817 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:15:10,210 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:15:10,210 WARN [BP-1507522640-172.17.0.2-1733076794999 heartbeating to localhost/127.0.0.1:39817 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1507522640-172.17.0.2-1733076794999 (Datanode Uuid acb88787-459b-4d82-951e-73e255f8eccb) service to localhost/127.0.0.1:39817 2024-12-01T18:15:10,210 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:15:10,211 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/cluster_c1d378f7-0a5d-f163-9b2b-755009fab9b6/dfs/data/data1/current/BP-1507522640-172.17.0.2-1733076794999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:15:10,211 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/cluster_c1d378f7-0a5d-f163-9b2b-755009fab9b6/dfs/data/data2/current/BP-1507522640-172.17.0.2-1733076794999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:15:10,211 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:15:10,226 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5682c4d1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:15:10,227 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ff1a6c1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:15:10,227 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:15:10,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74468826{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:15:10,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@88aab13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/hadoop.log.dir/,STOPPED} 2024-12-01T18:15:10,240 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-01T18:15:10,311 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-01T18:15:10,325 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=61 (was 12) Potentially hanging thread: IPC Client (1454462474) connection to localhost/127.0.0.1:39817 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1454462474) connection to localhost/127.0.0.1:39817 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39817 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@66029389 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39817 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/b8365d49b74c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1454462474) connection to localhost/127.0.0.1:39817 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:39817 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39817 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/b8365d49b74c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39817 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/b8365d49b74c:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: RS-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=403 (was 286) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=169 (was 197), ProcessCount=11 (was 11), AvailableMemoryMB=2943 (was 3702) 2024-12-01T18:15:10,333 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=62, OpenFileDescriptor=403, MaxFileDescriptor=1048576, SystemLoadAverage=169, ProcessCount=11, AvailableMemoryMB=2943 2024-12-01T18:15:10,333 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T18:15:10,333 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/hadoop.log.dir so I do NOT create it in target/test-data/618a53fe-0237-aba8-2017-c601437a2fec 2024-12-01T18:15:10,333 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/57276c8b-3711-3e55-a6d2-c24c25cfce3a/hadoop.tmp.dir so I do NOT create it in target/test-data/618a53fe-0237-aba8-2017-c601437a2fec 2024-12-01T18:15:10,334 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548, deleteOnExit=true 2024-12-01T18:15:10,334 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-01T18:15:10,334 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/test.cache.data in system properties and HBase conf 2024-12-01T18:15:10,334 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T18:15:10,334 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir in system properties and HBase conf 2024-12-01T18:15:10,334 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T18:15:10,334 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T18:15:10,334 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-01T18:15:10,335 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T18:15:10,335 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:15:10,335 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:15:10,335 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T18:15:10,335 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:15:10,335 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T18:15:10,335 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T18:15:10,335 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:15:10,336 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:15:10,336 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T18:15:10,336 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/nfs.dump.dir in system properties and HBase conf 2024-12-01T18:15:10,336 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/java.io.tmpdir in system properties and HBase conf 2024-12-01T18:15:10,336 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:15:10,336 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T18:15:10,336 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T18:15:10,355 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-01T18:15:10,450 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:15:10,474 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:15:10,476 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:15:10,476 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:15:10,476 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:15:10,477 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:15:10,480 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49bb953b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:15:10,481 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25d9b335{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:15:10,654 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@722cfddf{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/java.io.tmpdir/jetty-localhost-33895-hadoop-hdfs-3_4_1-tests_jar-_-any-10843119109057773961/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:15:10,655 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@411ed8c5{HTTP/1.1, (http/1.1)}{localhost:33895} 2024-12-01T18:15:10,656 INFO [Time-limited test {}] server.Server(415): Started @117949ms 2024-12-01T18:15:10,676 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-01T18:15:10,852 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:15:10,861 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:15:10,863 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:15:10,863 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:15:10,863 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T18:15:10,864 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@415f9d92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:15:10,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ad3a453{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:15:11,024 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e1ad401{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/java.io.tmpdir/jetty-localhost-40303-hadoop-hdfs-3_4_1-tests_jar-_-any-16409037629376835932/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:15:11,025 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5c5873de{HTTP/1.1, (http/1.1)}{localhost:40303} 2024-12-01T18:15:11,026 INFO [Time-limited test {}] server.Server(415): Started @118319ms 2024-12-01T18:15:11,028 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:15:11,072 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:15:11,076 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:15:11,080 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:15:11,080 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:15:11,080 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:15:11,081 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bddbe80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:15:11,081 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e043b3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:15:11,156 WARN [Thread-449 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data1/current/BP-1689502036-172.17.0.2-1733076910377/current, will proceed with Du for space computation calculation, 2024-12-01T18:15:11,157 WARN [Thread-450 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data2/current/BP-1689502036-172.17.0.2-1733076910377/current, will proceed with Du for space computation calculation, 2024-12-01T18:15:11,186 WARN [Thread-428 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:15:11,191 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ebb1b6e6bf9194 with lease ID 0x239788ce6d12094f: Processing first storage report for DS-9d0eec45-d214-45f6-8cdb-09297d74690d from datanode DatanodeRegistration(127.0.0.1:35165, datanodeUuid=3b98803e-ebf7-4974-b1ec-c762ea0c3bbb, infoPort=43481, infoSecurePort=0, ipcPort=43445, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377) 2024-12-01T18:15:11,191 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ebb1b6e6bf9194 with lease ID 0x239788ce6d12094f: from storage DS-9d0eec45-d214-45f6-8cdb-09297d74690d node DatanodeRegistration(127.0.0.1:35165, datanodeUuid=3b98803e-ebf7-4974-b1ec-c762ea0c3bbb, infoPort=43481, infoSecurePort=0, ipcPort=43445, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T18:15:11,191 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ebb1b6e6bf9194 with lease ID 0x239788ce6d12094f: Processing first storage report for DS-57d3a703-8b25-4daf-bb9b-4f0781d9b9b8 from datanode DatanodeRegistration(127.0.0.1:35165, datanodeUuid=3b98803e-ebf7-4974-b1ec-c762ea0c3bbb, infoPort=43481, infoSecurePort=0, ipcPort=43445, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377) 2024-12-01T18:15:11,191 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ebb1b6e6bf9194 with lease ID 0x239788ce6d12094f: from storage DS-57d3a703-8b25-4daf-bb9b-4f0781d9b9b8 node DatanodeRegistration(127.0.0.1:35165, datanodeUuid=3b98803e-ebf7-4974-b1ec-c762ea0c3bbb, infoPort=43481, infoSecurePort=0, ipcPort=43445, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:15:11,231 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3666a50b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/java.io.tmpdir/jetty-localhost-32907-hadoop-hdfs-3_4_1-tests_jar-_-any-5149726948214647980/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:15:11,231 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3a9cfc80{HTTP/1.1, (http/1.1)}{localhost:32907} 2024-12-01T18:15:11,231 INFO [Time-limited test {}] server.Server(415): Started @118525ms 2024-12-01T18:15:11,233 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:15:11,346 WARN [Thread-475 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data3/current/BP-1689502036-172.17.0.2-1733076910377/current, will proceed with Du for space computation calculation, 2024-12-01T18:15:11,348 WARN [Thread-476 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data4/current/BP-1689502036-172.17.0.2-1733076910377/current, will proceed with Du for space computation calculation, 2024-12-01T18:15:11,383 WARN [Thread-464 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:15:11,386 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcd686478c37f5685 with lease ID 0x239788ce6d120950: Processing first storage report for DS-25c71848-679d-454c-a829-41c1af7b9da9 from datanode DatanodeRegistration(127.0.0.1:41477, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42105, infoSecurePort=0, ipcPort=32805, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377) 2024-12-01T18:15:11,386 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd686478c37f5685 with lease ID 0x239788ce6d120950: from storage DS-25c71848-679d-454c-a829-41c1af7b9da9 node DatanodeRegistration(127.0.0.1:41477, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42105, infoSecurePort=0, ipcPort=32805, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:15:11,386 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcd686478c37f5685 with lease ID 0x239788ce6d120950: Processing first storage report for DS-c0ce54a4-cbac-4853-98c7-789f9f542539 from datanode DatanodeRegistration(127.0.0.1:41477, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42105, infoSecurePort=0, ipcPort=32805, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377) 2024-12-01T18:15:11,387 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd686478c37f5685 with lease ID 0x239788ce6d120950: from storage DS-c0ce54a4-cbac-4853-98c7-789f9f542539 node DatanodeRegistration(127.0.0.1:41477, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42105, infoSecurePort=0, ipcPort=32805, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:15:11,468 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec 2024-12-01T18:15:11,472 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/zookeeper_0, clientPort=64640, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T18:15:11,473 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=64640 2024-12-01T18:15:11,473 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:15:11,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:15:11,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35165 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:15:11,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41477 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:15:11,493 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def with version=8 2024-12-01T18:15:11,493 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/hbase-staging 2024-12-01T18:15:11,496 INFO [Time-limited test {}] client.ConnectionUtils(129): master/b8365d49b74c:0 server-side Connection retries=45 2024-12-01T18:15:11,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:15:11,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:15:11,496 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:15:11,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:15:11,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:15:11,497 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:15:11,497 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:15:11,498 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41935 2024-12-01T18:15:11,498 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:15:11,500 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:15:11,504 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:41935 connecting to ZooKeeper ensemble=127.0.0.1:64640 2024-12-01T18:15:11,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:419350x0, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:15:11,513 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41935-0x1004ecabe550000 connected 2024-12-01T18:15:11,536 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:15:11,537 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:15:11,537 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:15:11,537 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41935 2024-12-01T18:15:11,540 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41935 2024-12-01T18:15:11,542 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41935 2024-12-01T18:15:11,544 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41935 2024-12-01T18:15:11,546 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41935 2024-12-01T18:15:11,546 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def, hbase.cluster.distributed=false 2024-12-01T18:15:11,564 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b8365d49b74c:0 server-side Connection retries=45 2024-12-01T18:15:11,565 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:15:11,565 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:15:11,565 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:15:11,565 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:15:11,565 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:15:11,565 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:15:11,565 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:15:11,566 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33203 2024-12-01T18:15:11,566 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T18:15:11,569 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T18:15:11,569 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:15:11,572 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:15:11,575 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33203 connecting to ZooKeeper ensemble=127.0.0.1:64640 2024-12-01T18:15:11,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:332030x0, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:15:11,582 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:15:11,582 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33203-0x1004ecabe550001 connected 2024-12-01T18:15:11,583 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:15:11,584 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:15:11,587 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33203 2024-12-01T18:15:11,588 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33203 2024-12-01T18:15:11,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33203 2024-12-01T18:15:11,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33203 2024-12-01T18:15:11,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33203 2024-12-01T18:15:11,596 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/b8365d49b74c,41935,1733076911495 2024-12-01T18:15:11,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:15:11,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:15:11,599 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b8365d49b74c,41935,1733076911495 2024-12-01T18:15:11,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:15:11,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:11,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:15:11,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:11,605 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:15:11,606 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:15:11,606 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b8365d49b74c,41935,1733076911495 from backup master directory 2024-12-01T18:15:11,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:15:11,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b8365d49b74c,41935,1733076911495 2024-12-01T18:15:11,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:15:11,609 WARN [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:15:11,609 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b8365d49b74c,41935,1733076911495 2024-12-01T18:15:11,612 DEBUG [M:0;b8365d49b74c:41935 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b8365d49b74c:41935 2024-12-01T18:15:11,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35165 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:15:11,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41477 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:15:11,632 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/hbase.id with ID: 89bfb79c-73b2-4f05-81fd-da18a0cbbdfd 2024-12-01T18:15:11,646 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:15:11,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:11,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:11,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41477 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:15:11,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35165 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:15:11,660 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:15:11,661 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T18:15:11,661 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:15:11,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41477 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:15:11,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35165 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:15:11,672 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store 2024-12-01T18:15:11,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41477 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:15:11,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35165 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:15:11,680 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:15:11,680 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:15:11,680 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:15:11,680 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:15:11,680 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:15:11,680 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:15:11,680 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:15:11,680 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:15:11,681 WARN [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/.initializing 2024-12-01T18:15:11,681 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/WALs/b8365d49b74c,41935,1733076911495 2024-12-01T18:15:11,684 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C41935%2C1733076911495, suffix=, logDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/WALs/b8365d49b74c,41935,1733076911495, archiveDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/oldWALs, maxLogs=10 2024-12-01T18:15:11,685 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41935%2C1733076911495.1733076911684 2024-12-01T18:15:11,697 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/WALs/b8365d49b74c,41935,1733076911495/b8365d49b74c%2C41935%2C1733076911495.1733076911684 2024-12-01T18:15:11,697 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43481:43481),(127.0.0.1/127.0.0.1:42105:42105)] 2024-12-01T18:15:11,697 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:15:11,697 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:15:11,698 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:15:11,698 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:15:11,699 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:15:11,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T18:15:11,701 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:11,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:15:11,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:15:11,703 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T18:15:11,703 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:11,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:15:11,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:15:11,705 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T18:15:11,705 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:11,706 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:15:11,706 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:15:11,708 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T18:15:11,708 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:11,709 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:15:11,710 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:15:11,710 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:15:11,713 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T18:15:11,714 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:15:11,717 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:15:11,718 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753472, jitterRate=-0.04191109538078308}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T18:15:11,719 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:15:11,719 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T18:15:11,723 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72fb05c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:15:11,724 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-01T18:15:11,725 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T18:15:11,725 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T18:15:11,725 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T18:15:11,725 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-01T18:15:11,726 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-01T18:15:11,726 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T18:15:11,728 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T18:15:11,729 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T18:15:11,730 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-01T18:15:11,731 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T18:15:11,731 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T18:15:11,733 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-01T18:15:11,734 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T18:15:11,734 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T18:15:11,736 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-01T18:15:11,736 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T18:15:11,738 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T18:15:11,740 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T18:15:11,741 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T18:15:11,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:15:11,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:11,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:15:11,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:11,743 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=b8365d49b74c,41935,1733076911495, sessionid=0x1004ecabe550000, setting cluster-up flag (Was=false) 2024-12-01T18:15:11,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:11,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:11,753 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T18:15:11,754 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b8365d49b74c,41935,1733076911495 2024-12-01T18:15:11,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:11,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:11,764 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T18:15:11,765 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b8365d49b74c,41935,1733076911495 2024-12-01T18:15:11,768 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-01T18:15:11,768 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-01T18:15:11,769 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T18:15:11,769 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b8365d49b74c,41935,1733076911495 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T18:15:11,769 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:15:11,769 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:15:11,769 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:15:11,769 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:15:11,769 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b8365d49b74c:0, corePoolSize=10, maxPoolSize=10 2024-12-01T18:15:11,769 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:11,769 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b8365d49b74c:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:15:11,769 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:11,770 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733076941770 2024-12-01T18:15:11,770 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T18:15:11,770 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T18:15:11,770 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T18:15:11,770 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T18:15:11,771 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T18:15:11,771 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T18:15:11,771 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:11,771 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:15:11,771 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-01T18:15:11,771 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T18:15:11,771 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T18:15:11,772 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T18:15:11,772 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T18:15:11,772 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T18:15:11,772 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733076911772,5,FailOnTimeoutGroup] 2024-12-01T18:15:11,773 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:11,773 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:15:11,774 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733076911773,5,FailOnTimeoutGroup] 2024-12-01T18:15:11,774 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:11,774 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T18:15:11,774 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:11,774 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:11,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41477 is added to blk_1073741831_1007 (size=1039) 2024-12-01T18:15:11,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35165 is added to blk_1073741831_1007 (size=1039) 2024-12-01T18:15:11,781 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-01T18:15:11,782 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def 2024-12-01T18:15:11,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35165 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:15:11,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41477 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:15:11,792 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:15:11,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:15:11,796 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:15:11,796 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:11,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:15:11,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:15:11,800 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:15:11,800 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:11,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:15:11,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:15:11,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:15:11,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:11,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:15:11,804 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/hbase/meta/1588230740 2024-12-01T18:15:11,804 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/hbase/meta/1588230740 2024-12-01T18:15:11,806 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-01T18:15:11,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-01T18:15:11,810 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:15:11,810 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821826, jitterRate=0.04500584304332733}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-01T18:15:11,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-01T18:15:11,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:15:11,812 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:15:11,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:15:11,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:15:11,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:15:11,812 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-01T18:15:11,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:15:11,813 DEBUG [RS:0;b8365d49b74c:33203 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b8365d49b74c:33203 2024-12-01T18:15:11,813 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:15:11,813 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-01T18:15:11,814 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T18:15:11,814 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1008): ClusterId : 89bfb79c-73b2-4f05-81fd-da18a0cbbdfd 2024-12-01T18:15:11,814 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T18:15:11,815 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T18:15:11,816 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T18:15:11,817 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T18:15:11,818 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T18:15:11,820 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T18:15:11,820 DEBUG [RS:0;b8365d49b74c:33203 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8ff6a37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:15:11,821 DEBUG [RS:0;b8365d49b74c:33203 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21bd12cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b8365d49b74c/172.17.0.2:0 2024-12-01T18:15:11,821 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-01T18:15:11,821 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-01T18:15:11,821 DEBUG [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-01T18:15:11,821 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(3073): reportForDuty to master=b8365d49b74c,41935,1733076911495 with isa=b8365d49b74c/172.17.0.2:33203, startcode=1733076911564 2024-12-01T18:15:11,822 DEBUG [RS:0;b8365d49b74c:33203 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:15:11,825 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59907, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:15:11,825 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41935 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b8365d49b74c,33203,1733076911564 2024-12-01T18:15:11,825 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41935 {}] master.ServerManager(486): Registering regionserver=b8365d49b74c,33203,1733076911564 2024-12-01T18:15:11,827 DEBUG [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def 2024-12-01T18:15:11,827 DEBUG [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41701 2024-12-01T18:15:11,827 DEBUG [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-01T18:15:11,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:15:11,830 DEBUG [RS:0;b8365d49b74c:33203 {}] zookeeper.ZKUtil(111): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b8365d49b74c,33203,1733076911564 2024-12-01T18:15:11,830 WARN [RS:0;b8365d49b74c:33203 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:15:11,830 INFO [RS:0;b8365d49b74c:33203 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:15:11,830 DEBUG [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564 2024-12-01T18:15:11,832 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b8365d49b74c,33203,1733076911564] 2024-12-01T18:15:11,839 DEBUG [RS:0;b8365d49b74c:33203 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-01T18:15:11,839 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T18:15:11,843 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T18:15:11,844 INFO [RS:0;b8365d49b74c:33203 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:15:11,844 INFO [RS:0;b8365d49b74c:33203 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:11,847 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-01T18:15:11,848 INFO [RS:0;b8365d49b74c:33203 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:11,848 DEBUG [RS:0;b8365d49b74c:33203 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:11,848 DEBUG [RS:0;b8365d49b74c:33203 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:11,848 DEBUG [RS:0;b8365d49b74c:33203 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:11,848 DEBUG [RS:0;b8365d49b74c:33203 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:11,848 DEBUG [RS:0;b8365d49b74c:33203 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:11,848 DEBUG [RS:0;b8365d49b74c:33203 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b8365d49b74c:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:15:11,849 DEBUG [RS:0;b8365d49b74c:33203 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:11,849 DEBUG [RS:0;b8365d49b74c:33203 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:11,849 DEBUG [RS:0;b8365d49b74c:33203 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:11,849 DEBUG [RS:0;b8365d49b74c:33203 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:11,849 DEBUG [RS:0;b8365d49b74c:33203 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:11,849 DEBUG [RS:0;b8365d49b74c:33203 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:15:11,849 DEBUG [RS:0;b8365d49b74c:33203 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:15:11,851 INFO [RS:0;b8365d49b74c:33203 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:11,851 INFO [RS:0;b8365d49b74c:33203 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:11,851 INFO [RS:0;b8365d49b74c:33203 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:11,851 INFO [RS:0;b8365d49b74c:33203 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:11,851 INFO [RS:0;b8365d49b74c:33203 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,33203,1733076911564-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:15:11,877 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T18:15:11,878 INFO [RS:0;b8365d49b74c:33203 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,33203,1733076911564-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:11,899 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.Replication(204): b8365d49b74c,33203,1733076911564 started 2024-12-01T18:15:11,899 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1767): Serving as b8365d49b74c,33203,1733076911564, RpcServer on b8365d49b74c/172.17.0.2:33203, sessionid=0x1004ecabe550001 2024-12-01T18:15:11,899 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T18:15:11,899 DEBUG [RS:0;b8365d49b74c:33203 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b8365d49b74c,33203,1733076911564 2024-12-01T18:15:11,899 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,33203,1733076911564' 2024-12-01T18:15:11,899 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T18:15:11,900 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T18:15:11,901 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T18:15:11,901 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T18:15:11,901 DEBUG [RS:0;b8365d49b74c:33203 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b8365d49b74c,33203,1733076911564 2024-12-01T18:15:11,901 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,33203,1733076911564' 2024-12-01T18:15:11,901 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T18:15:11,902 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T18:15:11,902 DEBUG [RS:0;b8365d49b74c:33203 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T18:15:11,902 INFO [RS:0;b8365d49b74c:33203 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T18:15:11,902 INFO [RS:0;b8365d49b74c:33203 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T18:15:11,966 WARN [b8365d49b74c:41935 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-01T18:15:12,005 INFO [RS:0;b8365d49b74c:33203 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C33203%2C1733076911564, suffix=, logDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564, archiveDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/oldWALs, maxLogs=32 2024-12-01T18:15:12,007 INFO [RS:0;b8365d49b74c:33203 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C33203%2C1733076911564.1733076912007 2024-12-01T18:15:12,019 INFO [RS:0;b8365d49b74c:33203 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 2024-12-01T18:15:12,019 DEBUG [RS:0;b8365d49b74c:33203 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42105:42105),(127.0.0.1/127.0.0.1:43481:43481)] 2024-12-01T18:15:12,217 DEBUG [b8365d49b74c:41935 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-01T18:15:12,217 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b8365d49b74c,33203,1733076911564 2024-12-01T18:15:12,218 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b8365d49b74c,33203,1733076911564, state=OPENING 2024-12-01T18:15:12,220 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T18:15:12,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:12,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:12,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=b8365d49b74c,33203,1733076911564}] 2024-12-01T18:15:12,223 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:15:12,223 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:15:12,376 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b8365d49b74c,33203,1733076911564 2024-12-01T18:15:12,376 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T18:15:12,379 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44338, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T18:15:12,383 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-01T18:15:12,383 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:15:12,386 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C33203%2C1733076911564.meta, suffix=.meta, logDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564, archiveDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/oldWALs, maxLogs=32 2024-12-01T18:15:12,389 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta 2024-12-01T18:15:12,406 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta 2024-12-01T18:15:12,406 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42105:42105),(127.0.0.1/127.0.0.1:43481:43481)] 2024-12-01T18:15:12,406 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:15:12,406 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T18:15:12,407 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T18:15:12,407 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T18:15:12,407 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T18:15:12,407 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:15:12,407 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-01T18:15:12,407 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-01T18:15:12,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:15:12,410 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:15:12,410 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:12,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:15:12,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:15:12,412 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:15:12,412 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:12,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:15:12,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:15:12,414 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:15:12,414 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:12,415 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:15:12,415 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/hbase/meta/1588230740 2024-12-01T18:15:12,417 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/hbase/meta/1588230740 2024-12-01T18:15:12,419 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-01T18:15:12,421 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-01T18:15:12,422 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=701473, jitterRate=-0.10803164541721344}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-01T18:15:12,423 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-01T18:15:12,425 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733076912376 2024-12-01T18:15:12,427 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T18:15:12,427 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-01T18:15:12,428 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,33203,1733076911564 2024-12-01T18:15:12,429 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b8365d49b74c,33203,1733076911564, state=OPEN 2024-12-01T18:15:12,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:15:12,434 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:15:12,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:15:12,435 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:15:12,438 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T18:15:12,439 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=b8365d49b74c,33203,1733076911564 in 212 msec 2024-12-01T18:15:12,441 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T18:15:12,441 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 625 msec 2024-12-01T18:15:12,444 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 675 msec 2024-12-01T18:15:12,444 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733076912444, completionTime=-1 2024-12-01T18:15:12,444 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-01T18:15:12,444 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-01T18:15:12,445 DEBUG [hconnection-0x25e99303-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:15:12,447 INFO [RS-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44348, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:15:12,448 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-01T18:15:12,448 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733076972448 2024-12-01T18:15:12,449 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733077032448 2024-12-01T18:15:12,449 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 4 msec 2024-12-01T18:15:12,455 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41935,1733076911495-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:12,455 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41935,1733076911495-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:12,455 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41935,1733076911495-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:12,455 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b8365d49b74c:41935, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:12,456 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:12,456 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-01T18:15:12,456 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:15:12,457 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-01T18:15:12,459 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T18:15:12,459 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:12,460 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T18:15:12,461 DEBUG [master/b8365d49b74c:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-01T18:15:12,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35165 is added to blk_1073741835_1011 (size=358) 2024-12-01T18:15:12,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41477 is added to blk_1073741835_1011 (size=358) 2024-12-01T18:15:12,476 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d90a57a1002207e8dc1819546803b835, NAME => 'hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def 2024-12-01T18:15:12,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35165 is added to blk_1073741836_1012 (size=42) 2024-12-01T18:15:12,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41477 is added to blk_1073741836_1012 (size=42) 2024-12-01T18:15:12,486 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:15:12,486 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing d90a57a1002207e8dc1819546803b835, disabling compactions & flushes 2024-12-01T18:15:12,486 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:12,487 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:12,487 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. after waiting 0 ms 2024-12-01T18:15:12,487 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:12,487 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:12,487 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for d90a57a1002207e8dc1819546803b835: 2024-12-01T18:15:12,488 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T18:15:12,489 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733076912488"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733076912488"}]},"ts":"1733076912488"} 2024-12-01T18:15:12,491 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-01T18:15:12,493 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T18:15:12,493 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733076912493"}]},"ts":"1733076912493"} 2024-12-01T18:15:12,495 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-01T18:15:12,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d90a57a1002207e8dc1819546803b835, ASSIGN}] 2024-12-01T18:15:12,501 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d90a57a1002207e8dc1819546803b835, ASSIGN 2024-12-01T18:15:12,502 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=d90a57a1002207e8dc1819546803b835, ASSIGN; state=OFFLINE, location=b8365d49b74c,33203,1733076911564; forceNewPlan=false, retain=false 2024-12-01T18:15:12,653 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d90a57a1002207e8dc1819546803b835, regionState=OPENING, regionLocation=b8365d49b74c,33203,1733076911564 2024-12-01T18:15:12,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure d90a57a1002207e8dc1819546803b835, server=b8365d49b74c,33203,1733076911564}] 2024-12-01T18:15:12,809 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b8365d49b74c,33203,1733076911564 2024-12-01T18:15:12,813 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:12,814 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => d90a57a1002207e8dc1819546803b835, NAME => 'hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:15:12,814 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace d90a57a1002207e8dc1819546803b835 2024-12-01T18:15:12,814 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:15:12,814 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for d90a57a1002207e8dc1819546803b835 2024-12-01T18:15:12,814 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for d90a57a1002207e8dc1819546803b835 2024-12-01T18:15:12,816 INFO [StoreOpener-d90a57a1002207e8dc1819546803b835-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d90a57a1002207e8dc1819546803b835 2024-12-01T18:15:12,817 INFO [StoreOpener-d90a57a1002207e8dc1819546803b835-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d90a57a1002207e8dc1819546803b835 columnFamilyName info 2024-12-01T18:15:12,818 DEBUG [StoreOpener-d90a57a1002207e8dc1819546803b835-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:12,818 INFO [StoreOpener-d90a57a1002207e8dc1819546803b835-1 {}] regionserver.HStore(327): Store=d90a57a1002207e8dc1819546803b835/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:15:12,819 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/hbase/namespace/d90a57a1002207e8dc1819546803b835 2024-12-01T18:15:12,819 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/hbase/namespace/d90a57a1002207e8dc1819546803b835 2024-12-01T18:15:12,822 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for d90a57a1002207e8dc1819546803b835 2024-12-01T18:15:12,824 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/hbase/namespace/d90a57a1002207e8dc1819546803b835/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:15:12,825 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened d90a57a1002207e8dc1819546803b835; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818962, jitterRate=0.041364505887031555}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:15:12,826 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for d90a57a1002207e8dc1819546803b835: 2024-12-01T18:15:12,827 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835., pid=6, masterSystemTime=1733076912809 2024-12-01T18:15:12,829 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:12,829 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:12,830 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d90a57a1002207e8dc1819546803b835, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,33203,1733076911564 2024-12-01T18:15:12,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T18:15:12,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure d90a57a1002207e8dc1819546803b835, server=b8365d49b74c,33203,1733076911564 in 176 msec 2024-12-01T18:15:12,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T18:15:12,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=d90a57a1002207e8dc1819546803b835, ASSIGN in 335 msec 2024-12-01T18:15:12,839 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T18:15:12,839 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733076912839"}]},"ts":"1733076912839"} 2024-12-01T18:15:12,841 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-01T18:15:12,844 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T18:15:12,846 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 388 msec 2024-12-01T18:15:12,859 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-01T18:15:12,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:15:12,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:12,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:12,866 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-01T18:15:12,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:15:12,926 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 59 msec 2024-12-01T18:15:12,929 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-01T18:15:12,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:15:12,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 13 msec 2024-12-01T18:15:12,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-01T18:15:12,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-01T18:15:12,959 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.350sec 2024-12-01T18:15:12,960 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-01T18:15:12,960 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T18:15:12,960 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T18:15:12,960 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T18:15:12,960 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T18:15:12,960 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41935,1733076911495-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:15:12,960 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41935,1733076911495-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T18:15:12,962 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-01T18:15:12,962 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T18:15:12,962 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41935,1733076911495-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:12,996 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3ab557ac to 127.0.0.1:64640 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62cd5620 2024-12-01T18:15:12,999 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15f71bfd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:15:13,001 DEBUG [hconnection-0x7faa2a92-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:15:13,003 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:15:13,006 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=b8365d49b74c,41935,1733076911495 2024-12-01T18:15:13,007 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:15:13,009 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-01T18:15:13,026 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b8365d49b74c:0 server-side Connection retries=45 2024-12-01T18:15:13,026 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:15:13,026 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:15:13,026 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:15:13,026 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:15:13,026 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:15:13,027 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:15:13,027 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:15:13,027 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41243 2024-12-01T18:15:13,028 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T18:15:13,028 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T18:15:13,029 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:15:13,031 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:15:13,035 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41243 connecting to ZooKeeper ensemble=127.0.0.1:64640 2024-12-01T18:15:13,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412430x0, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:15:13,038 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41243-0x1004ecabe550003, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:15:13,038 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41243-0x1004ecabe550003 connected 2024-12-01T18:15:13,039 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41243-0x1004ecabe550003, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-01T18:15:13,040 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41243-0x1004ecabe550003, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:15:13,043 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41243 2024-12-01T18:15:13,044 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41243 2024-12-01T18:15:13,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41243 2024-12-01T18:15:13,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41243 2024-12-01T18:15:13,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41243 2024-12-01T18:15:13,052 DEBUG [pool-282-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-01T18:15:13,065 DEBUG [RS:1;b8365d49b74c:41243 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b8365d49b74c:41243 2024-12-01T18:15:13,066 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer(1008): ClusterId : 89bfb79c-73b2-4f05-81fd-da18a0cbbdfd 2024-12-01T18:15:13,067 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T18:15:13,069 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T18:15:13,069 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T18:15:13,072 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T18:15:13,073 DEBUG [RS:1;b8365d49b74c:41243 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79e5eb90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:15:13,073 DEBUG [RS:1;b8365d49b74c:41243 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a97ffae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b8365d49b74c/172.17.0.2:0 2024-12-01T18:15:13,074 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-01T18:15:13,074 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-01T18:15:13,074 DEBUG [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-01T18:15:13,074 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer(3073): reportForDuty to master=b8365d49b74c,41935,1733076911495 with isa=b8365d49b74c/172.17.0.2:41243, startcode=1733076913026 2024-12-01T18:15:13,074 DEBUG [RS:1;b8365d49b74c:41243 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:15:13,076 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35681, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:15:13,077 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41935 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b8365d49b74c,41243,1733076913026 2024-12-01T18:15:13,077 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41935 {}] master.ServerManager(486): Registering regionserver=b8365d49b74c,41243,1733076913026 2024-12-01T18:15:13,079 DEBUG [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def 2024-12-01T18:15:13,079 DEBUG [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41701 2024-12-01T18:15:13,079 DEBUG [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-01T18:15:13,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:15:13,088 DEBUG [RS:1;b8365d49b74c:41243 {}] zookeeper.ZKUtil(111): regionserver:41243-0x1004ecabe550003, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b8365d49b74c,41243,1733076913026 2024-12-01T18:15:13,088 WARN [RS:1;b8365d49b74c:41243 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:15:13,088 INFO [RS:1;b8365d49b74c:41243 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:15:13,088 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b8365d49b74c,41243,1733076913026] 2024-12-01T18:15:13,088 DEBUG [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,41243,1733076913026 2024-12-01T18:15:13,099 DEBUG [RS:1;b8365d49b74c:41243 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-01T18:15:13,100 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T18:15:13,103 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T18:15:13,104 INFO [RS:1;b8365d49b74c:41243 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:15:13,104 INFO [RS:1;b8365d49b74c:41243 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:13,108 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-01T18:15:13,110 INFO [RS:1;b8365d49b74c:41243 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:13,110 DEBUG [RS:1;b8365d49b74c:41243 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:13,110 DEBUG [RS:1;b8365d49b74c:41243 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:13,111 DEBUG [RS:1;b8365d49b74c:41243 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:13,111 DEBUG [RS:1;b8365d49b74c:41243 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:13,111 DEBUG [RS:1;b8365d49b74c:41243 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:13,111 DEBUG [RS:1;b8365d49b74c:41243 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b8365d49b74c:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:15:13,111 DEBUG [RS:1;b8365d49b74c:41243 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:13,111 DEBUG [RS:1;b8365d49b74c:41243 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:13,111 DEBUG [RS:1;b8365d49b74c:41243 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:13,111 DEBUG [RS:1;b8365d49b74c:41243 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:13,111 DEBUG [RS:1;b8365d49b74c:41243 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:15:13,111 DEBUG [RS:1;b8365d49b74c:41243 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:15:13,111 DEBUG [RS:1;b8365d49b74c:41243 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:15:13,111 INFO [RS:1;b8365d49b74c:41243 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:13,111 INFO [RS:1;b8365d49b74c:41243 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:13,111 INFO [RS:1;b8365d49b74c:41243 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:13,112 INFO [RS:1;b8365d49b74c:41243 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:13,112 INFO [RS:1;b8365d49b74c:41243 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41243,1733076913026-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:15:13,128 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T18:15:13,129 INFO [RS:1;b8365d49b74c:41243 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41243,1733076913026-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:15:13,144 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.Replication(204): b8365d49b74c,41243,1733076913026 started 2024-12-01T18:15:13,144 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer(1767): Serving as b8365d49b74c,41243,1733076913026, RpcServer on b8365d49b74c/172.17.0.2:41243, sessionid=0x1004ecabe550003 2024-12-01T18:15:13,145 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T18:15:13,145 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3355): Started new server=Thread[RS:1;b8365d49b74c:41243,5,FailOnTimeoutGroup] 2024-12-01T18:15:13,145 DEBUG [RS:1;b8365d49b74c:41243 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b8365d49b74c,41243,1733076913026 2024-12-01T18:15:13,145 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,41243,1733076913026' 2024-12-01T18:15:13,145 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T18:15:13,145 INFO [Time-limited test {}] wal.TestLogRolling(191): Replication=2 2024-12-01T18:15:13,146 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T18:15:13,146 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T18:15:13,146 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T18:15:13,146 DEBUG [RS:1;b8365d49b74c:41243 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b8365d49b74c,41243,1733076913026 2024-12-01T18:15:13,146 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,41243,1733076913026' 2024-12-01T18:15:13,146 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T18:15:13,146 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T18:15:13,147 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T18:15:13,147 DEBUG [RS:1;b8365d49b74c:41243 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T18:15:13,147 INFO [RS:1;b8365d49b74c:41243 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T18:15:13,147 INFO [RS:1;b8365d49b74c:41243 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T18:15:13,149 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T18:15:13,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41935 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-01T18:15:13,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41935 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-01T18:15:13,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41935 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:15:13,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41935 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-01T18:15:13,154 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T18:15:13,154 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:13,154 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41935 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 9 2024-12-01T18:15:13,155 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T18:15:13,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41935 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-01T18:15:13,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41477 is added to blk_1073741837_1013 (size=393) 2024-12-01T18:15:13,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35165 is added to blk_1073741837_1013 (size=393) 2024-12-01T18:15:13,176 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 40874ed7bf99d8ab52ff9f7b7db251a2, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def 2024-12-01T18:15:13,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35165 is added to blk_1073741838_1014 (size=76) 2024-12-01T18:15:13,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41477 is added to blk_1073741838_1014 (size=76) 2024-12-01T18:15:13,186 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:15:13,186 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1681): Closing 40874ed7bf99d8ab52ff9f7b7db251a2, disabling compactions & flushes 2024-12-01T18:15:13,186 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. 2024-12-01T18:15:13,186 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. 2024-12-01T18:15:13,186 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. after waiting 0 ms 2024-12-01T18:15:13,186 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. 2024-12-01T18:15:13,186 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. 2024-12-01T18:15:13,186 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1635): Region close journal for 40874ed7bf99d8ab52ff9f7b7db251a2: 2024-12-01T18:15:13,188 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T18:15:13,188 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733076913188"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733076913188"}]},"ts":"1733076913188"} 2024-12-01T18:15:13,191 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-01T18:15:13,192 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T18:15:13,193 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733076913193"}]},"ts":"1733076913193"} 2024-12-01T18:15:13,195 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-01T18:15:13,204 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=40874ed7bf99d8ab52ff9f7b7db251a2, ASSIGN}] 2024-12-01T18:15:13,206 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=40874ed7bf99d8ab52ff9f7b7db251a2, ASSIGN 2024-12-01T18:15:13,207 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=40874ed7bf99d8ab52ff9f7b7db251a2, ASSIGN; state=OFFLINE, location=b8365d49b74c,33203,1733076911564; forceNewPlan=false, retain=false 2024-12-01T18:15:13,250 INFO [RS:1;b8365d49b74c:41243 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C41243%2C1733076913026, suffix=, logDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,41243,1733076913026, archiveDir=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/oldWALs, maxLogs=32 2024-12-01T18:15:13,252 INFO [RS:1;b8365d49b74c:41243 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41243%2C1733076913026.1733076913252 2024-12-01T18:15:13,260 INFO [RS:1;b8365d49b74c:41243 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,41243,1733076913026/b8365d49b74c%2C41243%2C1733076913026.1733076913252 2024-12-01T18:15:13,260 DEBUG [RS:1;b8365d49b74c:41243 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43481:43481),(127.0.0.1/127.0.0.1:42105:42105)] 2024-12-01T18:15:13,359 INFO [b8365d49b74c:41935 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T18:15:13,359 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=40874ed7bf99d8ab52ff9f7b7db251a2, regionState=OPENING, regionLocation=b8365d49b74c,33203,1733076911564 2024-12-01T18:15:13,362 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 40874ed7bf99d8ab52ff9f7b7db251a2, server=b8365d49b74c,33203,1733076911564}] 2024-12-01T18:15:13,516 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b8365d49b74c,33203,1733076911564 2024-12-01T18:15:13,521 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. 2024-12-01T18:15:13,521 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 40874ed7bf99d8ab52ff9f7b7db251a2, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:15:13,522 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 40874ed7bf99d8ab52ff9f7b7db251a2 2024-12-01T18:15:13,522 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:15:13,522 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 40874ed7bf99d8ab52ff9f7b7db251a2 2024-12-01T18:15:13,523 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 40874ed7bf99d8ab52ff9f7b7db251a2 2024-12-01T18:15:13,524 INFO [StoreOpener-40874ed7bf99d8ab52ff9f7b7db251a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 40874ed7bf99d8ab52ff9f7b7db251a2 2024-12-01T18:15:13,526 INFO [StoreOpener-40874ed7bf99d8ab52ff9f7b7db251a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 40874ed7bf99d8ab52ff9f7b7db251a2 columnFamilyName info 2024-12-01T18:15:13,526 DEBUG [StoreOpener-40874ed7bf99d8ab52ff9f7b7db251a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:15:13,526 INFO [StoreOpener-40874ed7bf99d8ab52ff9f7b7db251a2-1 {}] regionserver.HStore(327): Store=40874ed7bf99d8ab52ff9f7b7db251a2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:15:13,527 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2 2024-12-01T18:15:13,528 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2 2024-12-01T18:15:13,530 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 40874ed7bf99d8ab52ff9f7b7db251a2 2024-12-01T18:15:13,532 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:15:13,533 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 40874ed7bf99d8ab52ff9f7b7db251a2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830671, jitterRate=0.056253477931022644}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:15:13,534 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 40874ed7bf99d8ab52ff9f7b7db251a2: 2024-12-01T18:15:13,535 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2., pid=11, masterSystemTime=1733076913515 2024-12-01T18:15:13,537 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. 2024-12-01T18:15:13,537 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. 2024-12-01T18:15:13,538 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=40874ed7bf99d8ab52ff9f7b7db251a2, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,33203,1733076911564 2024-12-01T18:15:13,539 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41935 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=b8365d49b74c,33203,1733076911564, table=TestLogRolling-testLogRollOnDatanodeDeath, region=40874ed7bf99d8ab52ff9f7b7db251a2. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-01T18:15:13,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-01T18:15:13,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 40874ed7bf99d8ab52ff9f7b7db251a2, server=b8365d49b74c,33203,1733076911564 in 179 msec 2024-12-01T18:15:13,546 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-01T18:15:13,546 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=40874ed7bf99d8ab52ff9f7b7db251a2, ASSIGN in 339 msec 2024-12-01T18:15:13,547 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T18:15:13,547 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733076913547"}]},"ts":"1733076913547"} 2024-12-01T18:15:13,549 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-01T18:15:13,552 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T18:15:13,554 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 401 msec 2024-12-01T18:15:14,097 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:15:14,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:15:14,620 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-01T18:15:14,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:15:14,647 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:15:17,840 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-01T18:15:17,841 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-01T18:15:17,841 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-01T18:15:18,602 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-01T18:15:18,602 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-01T18:15:18,603 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-01T18:15:23,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41935 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-01T18:15:23,157 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath, procId: 9 completed 2024-12-01T18:15:23,160 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-01T18:15:23,161 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. 2024-12-01T18:15:23,176 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:15:23,180 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:15:23,184 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:15:23,184 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:15:23,184 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:15:23,185 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@637c6f6b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:15:23,185 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a8a53d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:15:23,306 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@19594abf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/java.io.tmpdir/jetty-localhost-41399-hadoop-hdfs-3_4_1-tests_jar-_-any-12295617961948011942/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:15:23,306 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78d96bda{HTTP/1.1, (http/1.1)}{localhost:41399} 2024-12-01T18:15:23,306 INFO [Time-limited test {}] server.Server(415): Started @130600ms 2024-12-01T18:15:23,308 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:15:23,343 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:15:23,348 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:15:23,349 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:15:23,349 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:15:23,349 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T18:15:23,350 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47b90ec5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:15:23,350 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b6caab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:15:23,408 WARN [Thread-631 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data5/current/BP-1689502036-172.17.0.2-1733076910377/current, will proceed with Du for space computation calculation, 2024-12-01T18:15:23,408 WARN [Thread-632 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data6/current/BP-1689502036-172.17.0.2-1733076910377/current, will proceed with Du for space computation calculation, 2024-12-01T18:15:23,434 WARN [Thread-611 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:15:23,437 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd5995bb92aafd4d1 with lease ID 0x239788ce6d120951: Processing first storage report for DS-5456da90-636c-413b-a28c-04cde8ab04db from datanode DatanodeRegistration(127.0.0.1:36043, datanodeUuid=bd9f93a6-d146-492a-a588-b4d49473edc0, infoPort=46607, infoSecurePort=0, ipcPort=45007, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377) 2024-12-01T18:15:23,437 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd5995bb92aafd4d1 with lease ID 0x239788ce6d120951: from storage DS-5456da90-636c-413b-a28c-04cde8ab04db node DatanodeRegistration(127.0.0.1:36043, datanodeUuid=bd9f93a6-d146-492a-a588-b4d49473edc0, infoPort=46607, infoSecurePort=0, ipcPort=45007, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:15:23,437 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd5995bb92aafd4d1 with lease ID 0x239788ce6d120951: Processing first storage report for DS-168bade8-b9fb-4a5e-b11e-f2e1f37aed18 from datanode DatanodeRegistration(127.0.0.1:36043, datanodeUuid=bd9f93a6-d146-492a-a588-b4d49473edc0, infoPort=46607, infoSecurePort=0, ipcPort=45007, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377) 2024-12-01T18:15:23,437 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd5995bb92aafd4d1 with lease ID 0x239788ce6d120951: from storage DS-168bade8-b9fb-4a5e-b11e-f2e1f37aed18 node DatanodeRegistration(127.0.0.1:36043, datanodeUuid=bd9f93a6-d146-492a-a588-b4d49473edc0, infoPort=46607, infoSecurePort=0, ipcPort=45007, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:15:23,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18097a5f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/java.io.tmpdir/jetty-localhost-33423-hadoop-hdfs-3_4_1-tests_jar-_-any-9620773765098151977/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:15:23,470 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@52e765a8{HTTP/1.1, (http/1.1)}{localhost:33423} 2024-12-01T18:15:23,470 INFO [Time-limited test {}] server.Server(415): Started @130764ms 2024-12-01T18:15:23,472 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:15:23,504 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:15:23,508 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:15:23,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:15:23,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:15:23,508 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:15:23,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@518ab61c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:15:23,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fc1710a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:15:23,578 WARN [Thread-666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data7/current/BP-1689502036-172.17.0.2-1733076910377/current, will proceed with Du for space computation calculation, 2024-12-01T18:15:23,578 WARN [Thread-667 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data8/current/BP-1689502036-172.17.0.2-1733076910377/current, will proceed with Du for space computation calculation, 2024-12-01T18:15:23,603 WARN [Thread-646 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:15:23,606 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5385bcffd0e8242d with lease ID 0x239788ce6d120952: Processing first storage report for DS-68c6acc7-3a22-43a1-8e44-c276a1750b02 from datanode DatanodeRegistration(127.0.0.1:44007, datanodeUuid=dcb8a0c1-6df1-4932-894d-369da74a0cbb, infoPort=41395, infoSecurePort=0, ipcPort=45513, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377) 2024-12-01T18:15:23,606 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5385bcffd0e8242d with lease ID 0x239788ce6d120952: from storage DS-68c6acc7-3a22-43a1-8e44-c276a1750b02 node DatanodeRegistration(127.0.0.1:44007, datanodeUuid=dcb8a0c1-6df1-4932-894d-369da74a0cbb, infoPort=41395, infoSecurePort=0, ipcPort=45513, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:15:23,606 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5385bcffd0e8242d with lease ID 0x239788ce6d120952: Processing first storage report for DS-ea2a30f3-0a3c-4b95-8731-9d887994f8b3 from datanode DatanodeRegistration(127.0.0.1:44007, datanodeUuid=dcb8a0c1-6df1-4932-894d-369da74a0cbb, infoPort=41395, infoSecurePort=0, ipcPort=45513, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377) 2024-12-01T18:15:23,606 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5385bcffd0e8242d with lease ID 0x239788ce6d120952: from storage DS-ea2a30f3-0a3c-4b95-8731-9d887994f8b3 node DatanodeRegistration(127.0.0.1:44007, datanodeUuid=dcb8a0c1-6df1-4932-894d-369da74a0cbb, infoPort=41395, infoSecurePort=0, ipcPort=45513, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:15:23,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7614f371{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/java.io.tmpdir/jetty-localhost-37697-hadoop-hdfs-3_4_1-tests_jar-_-any-10286502146688321117/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:15:23,630 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@50808759{HTTP/1.1, (http/1.1)}{localhost:37697} 2024-12-01T18:15:23,630 INFO [Time-limited test {}] server.Server(415): Started @130924ms 2024-12-01T18:15:23,632 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:15:23,739 WARN [Thread-692 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data9/current/BP-1689502036-172.17.0.2-1733076910377/current, will proceed with Du for space computation calculation, 2024-12-01T18:15:23,740 WARN [Thread-693 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data10/current/BP-1689502036-172.17.0.2-1733076910377/current, will proceed with Du for space computation calculation, 2024-12-01T18:15:23,757 WARN [Thread-681 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:15:23,760 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe32d2d01587c3490 with lease ID 0x239788ce6d120953: Processing first storage report for DS-cb3c7192-9dc0-4870-8b23-020f575c98ea from datanode DatanodeRegistration(127.0.0.1:36645, datanodeUuid=84157bd8-8204-424e-9882-d58ab490bedd, infoPort=38325, infoSecurePort=0, ipcPort=43523, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377) 2024-12-01T18:15:23,760 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe32d2d01587c3490 with lease ID 0x239788ce6d120953: from storage DS-cb3c7192-9dc0-4870-8b23-020f575c98ea node DatanodeRegistration(127.0.0.1:36645, datanodeUuid=84157bd8-8204-424e-9882-d58ab490bedd, infoPort=38325, infoSecurePort=0, ipcPort=43523, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:15:23,760 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe32d2d01587c3490 with lease ID 0x239788ce6d120953: Processing first storage report for DS-504b536e-9f75-40c9-b341-aa8b0ba8d52f from datanode DatanodeRegistration(127.0.0.1:36645, datanodeUuid=84157bd8-8204-424e-9882-d58ab490bedd, infoPort=38325, infoSecurePort=0, ipcPort=43523, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377) 2024-12-01T18:15:23,760 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe32d2d01587c3490 with lease ID 0x239788ce6d120953: from storage DS-504b536e-9f75-40c9-b341-aa8b0ba8d52f node DatanodeRegistration(127.0.0.1:36645, datanodeUuid=84157bd8-8204-424e-9882-d58ab490bedd, infoPort=38325, infoSecurePort=0, ipcPort=43523, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:15:23,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3666a50b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:15:23,860 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3a9cfc80{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:15:23,860 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:15:23,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e043b3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:15:23,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bddbe80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,STOPPED} 2024-12-01T18:15:23,857 WARN [ResponseProcessor for block BP-1689502036-172.17.0.2-1733076910377:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1689502036-172.17.0.2-1733076910377:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1689502036-172.17.0.2-1733076910377:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:23,856 WARN [ResponseProcessor for block BP-1689502036-172.17.0.2-1733076910377:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1689502036-172.17.0.2-1733076910377:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:23,857 WARN [ResponseProcessor for block BP-1689502036-172.17.0.2-1733076910377:blk_1073741839_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1689502036-172.17.0.2-1733076910377:blk_1073741839_1015 java.io.IOException: Bad response ERROR for BP-1689502036-172.17.0.2-1733076910377:blk_1073741839_1015 from datanode DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:23,862 WARN [DataStreamer for file /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/WALs/b8365d49b74c,41935,1733076911495/b8365d49b74c%2C41935%2C1733076911495.1733076911684 block BP-1689502036-172.17.0.2-1733076910377:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK], DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]) is bad. 2024-12-01T18:15:23,862 WARN [DataStreamer for file /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,41243,1733076913026/b8365d49b74c%2C41243%2C1733076913026.1733076913252 block BP-1689502036-172.17.0.2-1733076910377:blk_1073741839_1015 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741839_1015 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK], DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]) is bad. 2024-12-01T18:15:23,856 WARN [ResponseProcessor for block BP-1689502036-172.17.0.2-1733076910377:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1689502036-172.17.0.2-1733076910377:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:23,863 WARN [BP-1689502036-172.17.0.2-1733076910377 heartbeating to localhost/127.0.0.1:41701 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:15:23,863 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:15:23,863 WARN [BP-1689502036-172.17.0.2-1733076910377 heartbeating to localhost/127.0.0.1:41701 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1689502036-172.17.0.2-1733076910377 (Datanode Uuid bd09ba3b-250a-46d3-98a5-dc69244ef5d7) service to localhost/127.0.0.1:41701 2024-12-01T18:15:23,863 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:15:23,864 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data3/current/BP-1689502036-172.17.0.2-1733076910377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:15:23,863 WARN [DataStreamer for file /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 block BP-1689502036-172.17.0.2-1733076910377:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK], DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]) is bad. 2024-12-01T18:15:23,864 WARN [DataStreamer for file /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta block BP-1689502036-172.17.0.2-1733076910377:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK], DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]) is bad. 2024-12-01T18:15:23,865 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:15:23,864 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data4/current/BP-1689502036-172.17.0.2-1733076910377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:15:23,863 WARN [PacketResponder: BP-1689502036-172.17.0.2-1733076910377:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41477] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:23,866 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:35986 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35165:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35986 dst: /127.0.0.1:35165 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:23,865 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1676445854_22 at /127.0.0.1:33202 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41477:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33202 dst: /127.0.0.1:41477 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49691 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:23,865 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:33234 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41477:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33234 dst: /127.0.0.1:41477 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:23,862 WARN [PacketResponder: BP-1689502036-172.17.0.2-1733076910377:blk_1073741839_1015, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41477] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:23,867 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:35994 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35165:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35994 dst: /127.0.0.1:35165 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:23,865 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1897946069_22 at /127.0.0.1:34072 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:41477:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34072 dst: /127.0.0.1:41477 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49396 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:23,866 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:33244 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41477:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33244 dst: /127.0.0.1:41477 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49683 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:23,868 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1676445854_22 at /127.0.0.1:35968 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35165:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35968 dst: /127.0.0.1:35165 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:23,868 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1897946069_22 at /127.0.0.1:37930 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:35165:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37930 dst: /127.0.0.1:35165 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:23,872 WARN [DataStreamer for file /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/WALs/b8365d49b74c,41935,1733076911495/b8365d49b74c%2C41935%2C1733076911495.1733076911684 block BP-1689502036-172.17.0.2-1733076910377:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:23,885 WARN [DataStreamer for file /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta block BP-1689502036-172.17.0.2-1733076910377:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:23,885 WARN [DataStreamer for file /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 block BP-1689502036-172.17.0.2-1733076910377:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:23,888 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e1ad401{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:15:23,888 WARN [DataStreamer for file /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,41243,1733076913026/b8365d49b74c%2C41243%2C1733076913026.1733076913252 block BP-1689502036-172.17.0.2-1733076910377:blk_1073741839_1015 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1015 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:23,888 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5c5873de{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:15:23,888 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:15:23,889 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ad3a453{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:15:23,889 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@415f9d92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,STOPPED} 2024-12-01T18:15:23,890 WARN [BP-1689502036-172.17.0.2-1733076910377 heartbeating to localhost/127.0.0.1:41701 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:15:23,890 WARN [BP-1689502036-172.17.0.2-1733076910377 heartbeating to localhost/127.0.0.1:41701 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1689502036-172.17.0.2-1733076910377 (Datanode Uuid 3b98803e-ebf7-4974-b1ec-c762ea0c3bbb) service to localhost/127.0.0.1:41701 2024-12-01T18:15:23,891 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:15:23,891 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:15:23,894 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data1/current/BP-1689502036-172.17.0.2-1733076910377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:15:23,895 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data2/current/BP-1689502036-172.17.0.2-1733076910377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:15:23,895 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:15:23,905 WARN [RS:0;b8365d49b74c:33203.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=4, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:23,906 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C33203%2C1733076911564:(num 1733076912007) roll requested 2024-12-01T18:15:23,906 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C33203%2C1733076911564.1733076923906 2024-12-01T18:15:23,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33203 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:23,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33203 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:57192 deadline: 1733076933905, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-01T18:15:23,909 WARN [Thread-703 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1020 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:23,910 WARN [Thread-703 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK], DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]) is bad. 2024-12-01T18:15:23,910 WARN [Thread-703 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741840_1020 2024-12-01T18:15:23,913 WARN [Thread-703 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK] 2024-12-01T18:15:23,920 WARN [regionserver/b8365d49b74c:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-01T18:15:23,920 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 with entries=4, filesize=959 B; new WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076923906 2024-12-01T18:15:23,921 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38325:38325),(127.0.0.1/127.0.0.1:46607:46607)] 2024-12-01T18:15:23,921 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 is not closed yet, will try archiving it next time 2024-12-01T18:15:23,921 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:23,921 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:23,922 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-01T18:15:23,922 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-01T18:15:23,922 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 2024-12-01T18:15:23,925 WARN [IPC Server handler 4 on default port 41701 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741833_1009 2024-12-01T18:15:23,927 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 after 5ms 2024-12-01T18:15:24,107 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-01T18:15:24,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:15:24,146 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:15:24,146 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:15:27,928 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 after 4006ms 2024-12-01T18:15:35,953 INFO [Time-limited test {}] wal.TestLogRolling(243): log.getCurrentFileName(): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076923906 2024-12-01T18:15:35,954 WARN [ResponseProcessor for block BP-1689502036-172.17.0.2-1733076910377:blk_1073741841_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1689502036-172.17.0.2-1733076910377:blk_1073741841_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:35,954 WARN [DataStreamer for file /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076923906 block BP-1689502036-172.17.0.2-1733076910377:blk_1073741841_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741841_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK], DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK]) is bad. 2024-12-01T18:15:35,955 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:49988 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741841_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36645:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49988 dst: /127.0.0.1:36645 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:35,955 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:54314 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741841_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36043:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54314 dst: /127.0.0.1:36043 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:35,972 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7614f371{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:15:35,972 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@50808759{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:15:35,972 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:15:35,972 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fc1710a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:15:35,972 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@518ab61c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,STOPPED} 2024-12-01T18:15:35,974 WARN [BP-1689502036-172.17.0.2-1733076910377 heartbeating to localhost/127.0.0.1:41701 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:15:35,974 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:15:35,974 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:15:35,974 WARN [BP-1689502036-172.17.0.2-1733076910377 heartbeating to localhost/127.0.0.1:41701 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1689502036-172.17.0.2-1733076910377 (Datanode Uuid 84157bd8-8204-424e-9882-d58ab490bedd) service to localhost/127.0.0.1:41701 2024-12-01T18:15:35,975 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data9/current/BP-1689502036-172.17.0.2-1733076910377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:15:35,975 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data10/current/BP-1689502036-172.17.0.2-1733076910377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:15:35,976 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:15:35,978 WARN [sync.1 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]] 2024-12-01T18:15:35,978 WARN [sync.1 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]] 2024-12-01T18:15:35,979 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C33203%2C1733076911564:(num 1733076923906) roll requested 2024-12-01T18:15:35,979 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C33203%2C1733076911564.1733076935979 2024-12-01T18:15:35,982 WARN [Thread-713 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:35,982 WARN [Thread-713 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK], DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK]) is bad. 2024-12-01T18:15:35,982 WARN [Thread-713 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741842_1024 2024-12-01T18:15:35,983 WARN [Thread-713 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK] 2024-12-01T18:15:35,984 WARN [Thread-713 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:35,984 WARN [Thread-713 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK], DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]) is bad. 2024-12-01T18:15:35,984 WARN [Thread-713 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741843_1025 2024-12-01T18:15:35,985 WARN [Thread-713 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK] 2024-12-01T18:15:35,994 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076923906 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076935979 2024-12-01T18:15:35,994 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46607:46607),(127.0.0.1/127.0.0.1:41395:41395)] 2024-12-01T18:15:35,994 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 is not closed yet, will try archiving it next time 2024-12-01T18:15:35,995 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076923906 is not closed yet, will try archiving it next time 2024-12-01T18:15:35,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36043 is added to blk_1073741841_1023 (size=2431) 2024-12-01T18:15:36,397 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 is not closed yet, will try archiving it next time 2024-12-01T18:15:38,449 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6627e9c1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36043, datanodeUuid=bd9f93a6-d146-492a-a588-b4d49473edc0, infoPort=46607, infoSecurePort=0, ipcPort=45007, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377):Failed to transfer BP-1689502036-172.17.0.2-1733076910377:blk_1073741841_1023 to 127.0.0.1:41477 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:39,982 WARN [ResponseProcessor for block BP-1689502036-172.17.0.2-1733076910377:blk_1073741844_1026 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1689502036-172.17.0.2-1733076910377:blk_1073741844_1026 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:39,983 WARN [DataStreamer for file /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076935979 block BP-1689502036-172.17.0.2-1733076910377:blk_1073741844_1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741844_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK], DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]) is bad. 2024-12-01T18:15:39,983 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:38722 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741844_1026] {}] datanode.DataXceiver(331): 127.0.0.1:36043:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38722 dst: /127.0.0.1:36043 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:39,983 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34134 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741844_1026] {}] datanode.DataXceiver(331): 127.0.0.1:44007:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34134 dst: /127.0.0.1:44007 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:39,985 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@19594abf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:15:39,986 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78d96bda{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:15:39,986 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:15:39,986 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a8a53d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:15:39,986 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@637c6f6b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,STOPPED} 2024-12-01T18:15:39,988 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:15:39,988 WARN [BP-1689502036-172.17.0.2-1733076910377 heartbeating to localhost/127.0.0.1:41701 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:15:39,988 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:15:39,988 WARN [BP-1689502036-172.17.0.2-1733076910377 heartbeating to localhost/127.0.0.1:41701 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1689502036-172.17.0.2-1733076910377 (Datanode Uuid bd9f93a6-d146-492a-a588-b4d49473edc0) service to localhost/127.0.0.1:41701 2024-12-01T18:15:39,989 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data5/current/BP-1689502036-172.17.0.2-1733076910377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:15:39,989 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data6/current/BP-1689502036-172.17.0.2-1733076910377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:15:39,989 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:15:39,992 WARN [sync.4 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK]] 2024-12-01T18:15:39,992 WARN [sync.4 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK]] 2024-12-01T18:15:39,992 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C33203%2C1733076911564:(num 1733076935979) roll requested 2024-12-01T18:15:39,992 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C33203%2C1733076911564.1733076939992 2024-12-01T18:15:39,995 WARN [Thread-723 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:39,995 WARN [Thread-723 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK], DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK]) is bad. 2024-12-01T18:15:39,995 WARN [Thread-723 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741845_1028 2024-12-01T18:15:39,996 WARN [Thread-723 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK] 2024-12-01T18:15:39,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33203 {}] regionserver.HRegion(8581): Flush requested on 40874ed7bf99d8ab52ff9f7b7db251a2 2024-12-01T18:15:39,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 40874ed7bf99d8ab52ff9f7b7db251a2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-01T18:15:39,999 WARN [Thread-723 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41477 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:39,999 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34152 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data8]'}, localName='127.0.0.1:44007', datanodeUuid='dcb8a0c1-6df1-4932-894d-369da74a0cbb', xmitsInProgress=0}:Exception transferring block BP-1689502036-172.17.0.2-1733076910377:blk_1073741846_1029 to mirror 127.0.0.1:41477 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,000 WARN [Thread-723 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK], DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]) is bad. 2024-12-01T18:15:40,000 WARN [Thread-723 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741846_1029 2024-12-01T18:15:40,000 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34152 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-01T18:15:40,000 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34152 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:44007:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34152 dst: /127.0.0.1:44007 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,000 WARN [Thread-723 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK] 2024-12-01T18:15:40,003 WARN [Thread-723 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35165 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:40,003 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34164 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data8]'}, localName='127.0.0.1:44007', datanodeUuid='dcb8a0c1-6df1-4932-894d-369da74a0cbb', xmitsInProgress=0}:Exception transferring block BP-1689502036-172.17.0.2-1733076910377:blk_1073741847_1030 to mirror 127.0.0.1:35165 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,003 WARN [Thread-723 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK], DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]) is bad. 2024-12-01T18:15:40,004 WARN [Thread-723 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741847_1030 2024-12-01T18:15:40,004 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34164 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-01T18:15:40,004 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34164 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:44007:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34164 dst: /127.0.0.1:44007 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,005 WARN [Thread-723 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK] 2024-12-01T18:15:40,008 WARN [Thread-723 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36043 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:40,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34172 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data8]'}, localName='127.0.0.1:44007', datanodeUuid='dcb8a0c1-6df1-4932-894d-369da74a0cbb', xmitsInProgress=0}:Exception transferring block BP-1689502036-172.17.0.2-1733076910377:blk_1073741848_1031 to mirror 127.0.0.1:36043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,008 WARN [Thread-723 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK], DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]) is bad. 2024-12-01T18:15:40,008 WARN [Thread-723 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741848_1031 2024-12-01T18:15:40,008 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34172 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-01T18:15:40,008 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34172 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:44007:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34172 dst: /127.0.0.1:44007 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,008 WARN [Thread-723 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK] 2024-12-01T18:15:40,009 WARN [IPC Server handler 2 on default port 41701 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-01T18:15:40,009 WARN [IPC Server handler 2 on default port 41701 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-01T18:15:40,009 WARN [IPC Server handler 2 on default port 41701 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-01T18:15:40,013 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076935979 with entries=13, filesize=14.10 KB; new WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076939992 2024-12-01T18:15:40,013 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41395:41395)] 2024-12-01T18:15:40,013 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 is not closed yet, will try archiving it next time 2024-12-01T18:15:40,013 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076935979 is not closed yet, will try archiving it next time 2024-12-01T18:15:40,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741844_1027 (size=14443) 2024-12-01T18:15:40,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/.tmp/info/b606b03183b2405bb4eaf17155e438ea is 1080, key is row0002/info:/1733076935977/Put/seqid=0 2024-12-01T18:15:40,021 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34190 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data8]'}, localName='127.0.0.1:44007', datanodeUuid='dcb8a0c1-6df1-4932-894d-369da74a0cbb', xmitsInProgress=0}:Exception transferring block BP-1689502036-172.17.0.2-1733076910377:blk_1073741850_1033 to mirror 127.0.0.1:35165 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,021 WARN [Thread-725 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35165 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:40,021 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34190 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-01T18:15:40,021 WARN [Thread-725 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK], DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]) is bad. 2024-12-01T18:15:40,022 WARN [Thread-725 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741850_1033 2024-12-01T18:15:40,022 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34190 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:44007:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34190 dst: /127.0.0.1:44007 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,022 WARN [Thread-725 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK] 2024-12-01T18:15:40,024 WARN [Thread-725 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36645 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:40,024 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34204 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data8]'}, localName='127.0.0.1:44007', datanodeUuid='dcb8a0c1-6df1-4932-894d-369da74a0cbb', xmitsInProgress=0}:Exception transferring block BP-1689502036-172.17.0.2-1733076910377:blk_1073741851_1034 to mirror 127.0.0.1:36645 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,025 WARN [Thread-725 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK], DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK]) is bad. 2024-12-01T18:15:40,025 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34204 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-01T18:15:40,025 WARN [Thread-725 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741851_1034 2024-12-01T18:15:40,025 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34204 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:44007:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34204 dst: /127.0.0.1:44007 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,025 WARN [Thread-725 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK] 2024-12-01T18:15:40,027 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34218 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data8]'}, localName='127.0.0.1:44007', datanodeUuid='dcb8a0c1-6df1-4932-894d-369da74a0cbb', xmitsInProgress=0}:Exception transferring block BP-1689502036-172.17.0.2-1733076910377:blk_1073741852_1035 to mirror 127.0.0.1:36043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,027 WARN [Thread-725 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36043 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:40,028 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34218 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-01T18:15:40,028 WARN [Thread-725 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK], DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]) is bad. 2024-12-01T18:15:40,028 WARN [Thread-725 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741852_1035 2024-12-01T18:15:40,028 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34218 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:44007:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34218 dst: /127.0.0.1:44007 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,028 WARN [Thread-725 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK] 2024-12-01T18:15:40,031 WARN [Thread-725 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41477 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:40,031 WARN [Thread-725 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK], DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]) is bad. 2024-12-01T18:15:40,031 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34222 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data8]'}, localName='127.0.0.1:44007', datanodeUuid='dcb8a0c1-6df1-4932-894d-369da74a0cbb', xmitsInProgress=0}:Exception transferring block BP-1689502036-172.17.0.2-1733076910377:blk_1073741853_1036 to mirror 127.0.0.1:41477 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,031 WARN [Thread-725 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741853_1036 2024-12-01T18:15:40,031 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34222 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-01T18:15:40,031 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34222 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:44007:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34222 dst: /127.0.0.1:44007 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,031 WARN [Thread-725 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK] 2024-12-01T18:15:40,032 WARN [IPC Server handler 3 on default port 41701 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-01T18:15:40,032 WARN [IPC Server handler 3 on default port 41701 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-01T18:15:40,032 WARN [IPC Server handler 3 on default port 41701 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-01T18:15:40,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741854_1037 (size=10347) 2024-12-01T18:15:40,212 WARN [sync.2 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK]] 2024-12-01T18:15:40,212 WARN [sync.2 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK]] 2024-12-01T18:15:40,212 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C33203%2C1733076911564:(num 1733076939992) roll requested 2024-12-01T18:15:40,213 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C33203%2C1733076911564.1733076940212 2024-12-01T18:15:40,216 WARN [Thread-738 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:40,216 WARN [Thread-738 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK], DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]) is bad. 2024-12-01T18:15:40,216 WARN [Thread-738 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741855_1038 2024-12-01T18:15:40,216 WARN [Thread-738 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK] 2024-12-01T18:15:40,218 WARN [Thread-738 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:40,218 WARN [Thread-738 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK], DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]) is bad. 2024-12-01T18:15:40,218 WARN [Thread-738 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741856_1039 2024-12-01T18:15:40,218 WARN [Thread-738 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41477,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK] 2024-12-01T18:15:40,220 WARN [Thread-738 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36645 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:40,220 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34246 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data8]'}, localName='127.0.0.1:44007', datanodeUuid='dcb8a0c1-6df1-4932-894d-369da74a0cbb', xmitsInProgress=0}:Exception transferring block BP-1689502036-172.17.0.2-1733076910377:blk_1073741857_1040 to mirror 127.0.0.1:36645 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,221 WARN [Thread-738 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK], DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK]) is bad. 2024-12-01T18:15:40,221 WARN [Thread-738 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741857_1040 2024-12-01T18:15:40,221 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34246 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-01T18:15:40,221 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34246 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:44007:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34246 dst: /127.0.0.1:44007 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,221 WARN [Thread-738 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36645,DS-cb3c7192-9dc0-4870-8b23-020f575c98ea,DISK] 2024-12-01T18:15:40,223 WARN [Thread-738 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35165 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:40,223 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34250 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data8]'}, localName='127.0.0.1:44007', datanodeUuid='dcb8a0c1-6df1-4932-894d-369da74a0cbb', xmitsInProgress=0}:Exception transferring block BP-1689502036-172.17.0.2-1733076910377:blk_1073741858_1041 to mirror 127.0.0.1:35165 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,224 WARN [Thread-738 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44007,DS-68c6acc7-3a22-43a1-8e44-c276a1750b02,DISK], DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]) is bad. 2024-12-01T18:15:40,224 WARN [Thread-738 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741858_1041 2024-12-01T18:15:40,224 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34250 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-01T18:15:40,224 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1598555587_22 at /127.0.0.1:34250 [Receiving block BP-1689502036-172.17.0.2-1733076910377:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:44007:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34250 dst: /127.0.0.1:44007 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:40,224 WARN [Thread-738 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK] 2024-12-01T18:15:40,225 WARN [IPC Server handler 4 on default port 41701 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-01T18:15:40,225 WARN [IPC Server handler 4 on default port 41701 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-01T18:15:40,225 WARN [IPC Server handler 4 on default port 41701 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-01T18:15:40,229 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076939992 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076940212 2024-12-01T18:15:40,229 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41395:41395)] 2024-12-01T18:15:40,229 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 is not closed yet, will try archiving it next time 2024-12-01T18:15:40,229 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076935979 is not closed yet, will try archiving it next time 2024-12-01T18:15:40,229 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076939992 is not closed yet, will try archiving it next time 2024-12-01T18:15:40,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741849_1032 (size=1261) 2024-12-01T18:15:40,414 WARN [sync.4 {}] wal.FSHLog(760): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-01T18:15:40,416 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 is not closed yet, will try archiving it next time 2024-12-01T18:15:40,416 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076939992 is not closed yet, will try archiving it next time 2024-12-01T18:15:40,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/.tmp/info/b606b03183b2405bb4eaf17155e438ea 2024-12-01T18:15:40,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/.tmp/info/b606b03183b2405bb4eaf17155e438ea as hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/info/b606b03183b2405bb4eaf17155e438ea 2024-12-01T18:15:40,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/info/b606b03183b2405bb4eaf17155e438ea, entries=5, sequenceid=12, filesize=10.1 K 2024-12-01T18:15:40,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 40874ed7bf99d8ab52ff9f7b7db251a2 in 459ms, sequenceid=12, compaction requested=false 2024-12-01T18:15:40,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 40874ed7bf99d8ab52ff9f7b7db251a2: 2024-12-01T18:15:40,630 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:15:40,636 DEBUG [Close-WAL-Writer-2 {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076912007 is not closed yet, will try archiving it next time 2024-12-01T18:15:40,640 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076923906 to hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/oldWALs/b8365d49b74c%2C33203%2C1733076911564.1733076923906 2024-12-01T18:15:40,642 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:15:40,642 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:15:40,642 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:15:40,643 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T18:15:40,643 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18fb5ac3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:15:40,644 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69f08f63{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:15:40,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@857d37c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/java.io.tmpdir/jetty-localhost-36795-hadoop-hdfs-3_4_1-tests_jar-_-any-15501919405493451761/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:15:40,780 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@560500bb{HTTP/1.1, (http/1.1)}{localhost:36795} 2024-12-01T18:15:40,781 INFO [Time-limited test {}] server.Server(415): Started @148074ms 2024-12-01T18:15:40,782 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:15:40,887 WARN [Thread-759 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:15:40,895 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x76afc8c10b5c5c8a with lease ID 0x239788ce6d120954: from storage DS-25c71848-679d-454c-a829-41c1af7b9da9 node DatanodeRegistration(127.0.0.1:43001, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42187, infoSecurePort=0, ipcPort=40793, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:15:40,896 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x76afc8c10b5c5c8a with lease ID 0x239788ce6d120954: from storage DS-c0ce54a4-cbac-4853-98c7-789f9f542539 node DatanodeRegistration(127.0.0.1:43001, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42187, infoSecurePort=0, ipcPort=40793, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:15:41,467 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T18:15:41,607 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3fe1cc1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44007, datanodeUuid=dcb8a0c1-6df1-4932-894d-369da74a0cbb, infoPort=41395, infoSecurePort=0, ipcPort=45513, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377):Failed to transfer BP-1689502036-172.17.0.2-1733076910377:blk_1073741854_1037 to 127.0.0.1:36645 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:41,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43001 is added to blk_1073741844_1027 (size=14443) 2024-12-01T18:15:41,770 WARN [master/b8365d49b74c:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=96, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:41,771 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C41935%2C1733076911495:(num 1733076911684) roll requested 2024-12-01T18:15:41,771 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:41,771 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41935%2C1733076911495.1733076941771 2024-12-01T18:15:41,771 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:41,775 WARN [Thread-784 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:41,775 WARN [Thread-784 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK], DatanodeInfoWithStorage[127.0.0.1:43001,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]) is bad. 2024-12-01T18:15:41,775 WARN [Thread-784 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741860_1043 2024-12-01T18:15:41,775 WARN [Thread-784 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK] 2024-12-01T18:15:41,781 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL 2024-12-01T18:15:41,781 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/WALs/b8365d49b74c,41935,1733076911495/b8365d49b74c%2C41935%2C1733076911495.1733076911684 with entries=93, filesize=46.05 KB; new WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/WALs/b8365d49b74c,41935,1733076911495/b8365d49b74c%2C41935%2C1733076911495.1733076941771 2024-12-01T18:15:41,782 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41395:41395),(127.0.0.1/127.0.0.1:42187:42187)] 2024-12-01T18:15:41,782 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/WALs/b8365d49b74c,41935,1733076911495/b8365d49b74c%2C41935%2C1733076911495.1733076911684 is not closed yet, will try archiving it next time 2024-12-01T18:15:41,782 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:41,782 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:41,782 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/WALs/b8365d49b74c,41935,1733076911495/b8365d49b74c%2C41935%2C1733076911495.1733076911684 2024-12-01T18:15:41,782 WARN [IPC Server handler 3 on default port 41701 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/WALs/b8365d49b74c,41935,1733076911495/b8365d49b74c%2C41935%2C1733076911495.1733076911684 has not been closed. Lease recovery is in progress. RecoveryId = 1045 for block blk_1073741830_1006 2024-12-01T18:15:41,783 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/WALs/b8365d49b74c,41935,1733076911495/b8365d49b74c%2C41935%2C1733076911495.1733076911684 after 1ms 2024-12-01T18:15:42,606 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@d39f316[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44007, datanodeUuid=dcb8a0c1-6df1-4932-894d-369da74a0cbb, infoPort=41395, infoSecurePort=0, ipcPort=45513, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377):Failed to transfer BP-1689502036-172.17.0.2-1733076910377:blk_1073741849_1032 to 127.0.0.1:36645 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:43,821 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:15:43,822 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57806, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:15:44,073 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:15:44,075 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:15:45,784 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/WALs/b8365d49b74c,41935,1733076911495/b8365d49b74c%2C41935%2C1733076911495.1733076911684 after 4002ms 2024-12-01T18:15:50,911 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@17c3ac6e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1689502036-172.17.0.2-1733076910377:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:35165,null,null]) java.net.ConnectException: Call From b8365d49b74c/172.17.0.2 to localhost:43445 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-01T18:15:50,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43001 is added to blk_1073741833_1022 (size=959) 2024-12-01T18:15:52,894 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@261e9f12[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43001, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42187, infoSecurePort=0, ipcPort=40793, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377):Failed to transfer BP-1689502036-172.17.0.2-1733076910377:blk_1073741837_1013 to 127.0.0.1:36645 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:52,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741835_1011 (size=358) 2024-12-01T18:15:53,430 INFO [master/b8365d49b74c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-01T18:15:53,430 INFO [master/b8365d49b74c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-01T18:15:53,894 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@261e9f12[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43001, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42187, infoSecurePort=0, ipcPort=40793, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377):Failed to transfer BP-1689502036-172.17.0.2-1733076910377:blk_1073741829_1005 to 127.0.0.1:36645 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:53,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741831_1007 (size=1039) 2024-12-01T18:15:55,895 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7dc6302b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43001, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42187, infoSecurePort=0, ipcPort=40793, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377):Failed to transfer BP-1689502036-172.17.0.2-1733076910377:blk_1073741832_1008 to 127.0.0.1:36043 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:55,895 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@261e9f12[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43001, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42187, infoSecurePort=0, ipcPort=40793, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377):Failed to transfer BP-1689502036-172.17.0.2-1733076910377:blk_1073741828_1004 to 127.0.0.1:36043 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:56,895 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7dc6302b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43001, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42187, infoSecurePort=0, ipcPort=40793, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377):Failed to transfer BP-1689502036-172.17.0.2-1733076910377:blk_1073741833_1022 to 127.0.0.1:36043 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:56,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:15:58,522 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 40874ed7bf99d8ab52ff9f7b7db251a2, had cached 0 bytes from a total of 10347 2024-12-01T18:15:58,895 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7dc6302b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43001, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42187, infoSecurePort=0, ipcPort=40793, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377):Failed to transfer BP-1689502036-172.17.0.2-1733076910377:blk_1073741825_1001 to 127.0.0.1:36043 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:58,895 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@261e9f12[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43001, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42187, infoSecurePort=0, ipcPort=40793, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377):Failed to transfer BP-1689502036-172.17.0.2-1733076910377:blk_1073741827_1003 to 127.0.0.1:36043 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:15:59,455 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C33203%2C1733076911564.1733076959455 2024-12-01T18:15:59,459 WARN [Thread-798 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:59,459 WARN [Thread-798 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741862_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK], DatanodeInfoWithStorage[127.0.0.1:43001,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]) is bad. 2024-12-01T18:15:59,459 WARN [Thread-798 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741862_1046 2024-12-01T18:15:59,460 WARN [Thread-798 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK] 2024-12-01T18:15:59,466 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076940212 with entries=2, filesize=1.57 KB; new WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076959455 2024-12-01T18:15:59,466 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41395:41395),(127.0.0.1/127.0.0.1:42187:42187)] 2024-12-01T18:15:59,466 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076940212 is not closed yet, will try archiving it next time 2024-12-01T18:15:59,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741859_1042 (size=1618) 2024-12-01T18:15:59,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33203 {}] regionserver.HRegion(8581): Flush requested on 40874ed7bf99d8ab52ff9f7b7db251a2 2024-12-01T18:15:59,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 40874ed7bf99d8ab52ff9f7b7db251a2 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-01T18:15:59,470 INFO [sync.3 {}] wal.FSHLog(777): LowReplication-Roller was enabled. 2024-12-01T18:15:59,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/.tmp/info/02fc3f2a72f1479dbb8e5ff0418b73f7 is 1080, key is row0007/info:/1733076939998/Put/seqid=0 2024-12-01T18:15:59,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43001 is added to blk_1073741864_1048 (size=13583) 2024-12-01T18:15:59,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-01T18:15:59,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741864_1048 (size=13583) 2024-12-01T18:15:59,483 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-01T18:15:59,483 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3ab557ac to 127.0.0.1:64640 2024-12-01T18:15:59,483 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:15:59,483 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T18:15:59,483 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=241821032, stopped=false 2024-12-01T18:15:59,483 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=25 (bloomFilter=true), to=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/.tmp/info/02fc3f2a72f1479dbb8e5ff0418b73f7 2024-12-01T18:15:59,483 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=b8365d49b74c,41935,1733076911495 2024-12-01T18:15:59,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41243-0x1004ecabe550003, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:15:59,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:15:59,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41243-0x1004ecabe550003, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:59,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:59,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:15:59,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:15:59,486 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-01T18:15:59,486 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:15:59,486 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b8365d49b74c,33203,1733076911564' ***** 2024-12-01T18:15:59,486 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-01T18:15:59,486 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b8365d49b74c,41243,1733076913026' ***** 2024-12-01T18:15:59,486 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-01T18:15:59,487 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T18:15:59,487 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T18:15:59,487 INFO [RS:1;b8365d49b74c:41243 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T18:15:59,487 INFO [RS:1;b8365d49b74c:41243 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T18:15:59,487 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer(1224): stopping server b8365d49b74c,41243,1733076913026 2024-12-01T18:15:59,487 DEBUG [RS:1;b8365d49b74c:41243 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:15:59,487 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-01T18:15:59,487 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer(1250): stopping server b8365d49b74c,41243,1733076913026; all regions closed. 2024-12-01T18:15:59,488 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:15:59,488 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41243-0x1004ecabe550003, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:15:59,488 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,41243,1733076913026 2024-12-01T18:15:59,488 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:15:59,488 WARN [WAL-Shutdown-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:59,488 ERROR [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer(1664): Shutdown / close of WAL failed: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... 2024-12-01T18:15:59,489 DEBUG [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer(1665): Shutdown / close exception details: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:59,489 DEBUG [RS:1;b8365d49b74c:41243 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:15:59,489 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:15:59,489 INFO [RS:1;b8365d49b74c:41243 {}] hbase.ChoreService(370): Chore service for: regionserver/b8365d49b74c:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-01T18:15:59,489 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T18:15:59,489 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T18:15:59,489 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-01T18:15:59,489 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T18:15:59,489 INFO [RS:1;b8365d49b74c:41243 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41243 2024-12-01T18:15:59,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41243-0x1004ecabe550003, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b8365d49b74c,41243,1733076913026 2024-12-01T18:15:59,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:15:59,492 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b8365d49b74c,41243,1733076913026] 2024-12-01T18:15:59,493 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b8365d49b74c,41243,1733076913026; numProcessing=1 2024-12-01T18:15:59,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/.tmp/info/02fc3f2a72f1479dbb8e5ff0418b73f7 as hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/info/02fc3f2a72f1479dbb8e5ff0418b73f7 2024-12-01T18:15:59,494 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b8365d49b74c,41243,1733076913026 already deleted, retry=false 2024-12-01T18:15:59,494 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b8365d49b74c,41243,1733076913026 expired; onlineServers=1 2024-12-01T18:15:59,500 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/info/02fc3f2a72f1479dbb8e5ff0418b73f7, entries=8, sequenceid=25, filesize=13.3 K 2024-12-01T18:15:59,501 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~10.50 KB/10757, heapSize ~11.48 KB/11760, currentSize=9.46 KB/9684 for 40874ed7bf99d8ab52ff9f7b7db251a2 in 33ms, sequenceid=25, compaction requested=false 2024-12-01T18:15:59,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 40874ed7bf99d8ab52ff9f7b7db251a2: 2024-12-01T18:15:59,502 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=23.4 K, sizeToCheck=16.0 K 2024-12-01T18:15:59,502 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-01T18:15:59,502 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/info/02fc3f2a72f1479dbb8e5ff0418b73f7 because midkey is the same as first or last row 2024-12-01T18:15:59,502 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-01T18:15:59,502 INFO [RS:0;b8365d49b74c:33203 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T18:15:59,502 INFO [RS:0;b8365d49b74c:33203 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T18:15:59,502 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(3579): Received CLOSE for 40874ed7bf99d8ab52ff9f7b7db251a2 2024-12-01T18:15:59,502 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(3579): Received CLOSE for d90a57a1002207e8dc1819546803b835 2024-12-01T18:15:59,502 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1224): stopping server b8365d49b74c,33203,1733076911564 2024-12-01T18:15:59,502 DEBUG [RS:0;b8365d49b74c:33203 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:15:59,502 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T18:15:59,502 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T18:15:59,502 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T18:15:59,502 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-01T18:15:59,502 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 40874ed7bf99d8ab52ff9f7b7db251a2, disabling compactions & flushes 2024-12-01T18:15:59,503 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. 2024-12-01T18:15:59,503 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-01T18:15:59,503 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. 2024-12-01T18:15:59,503 DEBUG [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 40874ed7bf99d8ab52ff9f7b7db251a2=TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2., d90a57a1002207e8dc1819546803b835=hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835.} 2024-12-01T18:15:59,503 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. after waiting 0 ms 2024-12-01T18:15:59,503 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:15:59,503 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. 2024-12-01T18:15:59,503 DEBUG [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 40874ed7bf99d8ab52ff9f7b7db251a2, d90a57a1002207e8dc1819546803b835 2024-12-01T18:15:59,503 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:15:59,503 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:15:59,503 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:15:59,503 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:15:59,503 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 40874ed7bf99d8ab52ff9f7b7db251a2 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-01T18:15:59,503 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.87 KB heapSize=5.40 KB 2024-12-01T18:15:59,503 WARN [RS_OPEN_META-regionserver/b8365d49b74c:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:59,504 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C33203%2C1733076911564.meta:.meta(num 1733076912389) roll requested 2024-12-01T18:15:59,504 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:15:59,504 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C33203%2C1733076911564.meta.1733076959504.meta 2024-12-01T18:15:59,504 ERROR [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2808): ***** ABORTING region server b8365d49b74c,33203,1733076911564: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:59,504 ERROR [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2815): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-01T18:15:59,507 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-01T18:15:59,508 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-01T18:15:59,508 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-01T18:15:59,508 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-01T18:15:59,508 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2819): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 257344064 }, "NonHeapMemoryUsage": { "committed": 162070528, "init": 7667712, "max": -1, "used": 160264552 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-12-01T18:15:59,509 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/.tmp/info/8a6e3a1b740a4413a9b9b81e784dfc1e is 1080, key is row0014/info:/1733076959469/Put/seqid=0 2024-12-01T18:15:59,511 WARN [Thread-813 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:59,511 WARN [Thread-813 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1689502036-172.17.0.2-1733076910377:blk_1073741866_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK], DatanodeInfoWithStorage[127.0.0.1:43001,DS-25c71848-679d-454c-a829-41c1af7b9da9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK]) is bad. 2024-12-01T18:15:59,511 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41935 {}] master.MasterRpcServices(626): b8365d49b74c,33203,1733076911564 reported a fatal error: ***** ABORTING region server b8365d49b74c,33203,1733076911564: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** Cause: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) 2024-12-01T18:15:59,512 WARN [Thread-813 {}] hdfs.DataStreamer(1850): Abandoning BP-1689502036-172.17.0.2-1733076910377:blk_1073741866_1050 2024-12-01T18:15:59,512 WARN [Thread-813 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36043,DS-5456da90-636c-413b-a28c-04cde8ab04db,DISK] 2024-12-01T18:15:59,518 WARN [regionserver/b8365d49b74c:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-01T18:15:59,518 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta with entries=11, filesize=3.63 KB; new WAL /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076959504.meta 2024-12-01T18:15:59,518 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41395:41395),(127.0.0.1/127.0.0.1:42187:42187)] 2024-12-01T18:15:59,518 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta is not closed yet, will try archiving it next time 2024-12-01T18:15:59,518 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:59,518 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35165,DS-9d0eec45-d214-45f6-8cdb-09297d74690d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:15:59,518 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta 2024-12-01T18:15:59,519 WARN [IPC Server handler 2 on default port 41701 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta has not been closed. Lease recovery is in progress. RecoveryId = 1052 for block blk_1073741834_1010 2024-12-01T18:15:59,519 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta after 1ms 2024-12-01T18:15:59,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741867_1051 (size=14663) 2024-12-01T18:15:59,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43001 is added to blk_1073741867_1051 (size=14663) 2024-12-01T18:15:59,521 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/.tmp/info/8a6e3a1b740a4413a9b9b81e784dfc1e 2024-12-01T18:15:59,527 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/.tmp/info/8a6e3a1b740a4413a9b9b81e784dfc1e as hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/info/8a6e3a1b740a4413a9b9b81e784dfc1e 2024-12-01T18:15:59,533 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/info/8a6e3a1b740a4413a9b9b81e784dfc1e, entries=9, sequenceid=37, filesize=14.3 K 2024-12-01T18:15:59,535 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 40874ed7bf99d8ab52ff9f7b7db251a2 in 31ms, sequenceid=37, compaction requested=true 2024-12-01T18:15:59,539 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/data/default/TestLogRolling-testLogRollOnDatanodeDeath/40874ed7bf99d8ab52ff9f7b7db251a2/recovered.edits/40.seqid, newMaxSeqId=40, maxSeqId=1 2024-12-01T18:15:59,540 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. 2024-12-01T18:15:59,540 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 40874ed7bf99d8ab52ff9f7b7db251a2: 2024-12-01T18:15:59,540 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733076913150.40874ed7bf99d8ab52ff9f7b7db251a2. 2024-12-01T18:15:59,540 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing d90a57a1002207e8dc1819546803b835, disabling compactions & flushes 2024-12-01T18:15:59,540 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:59,540 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:59,540 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. after waiting 0 ms 2024-12-01T18:15:59,540 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:59,540 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for d90a57a1002207e8dc1819546803b835: 2024-12-01T18:15:59,540 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:59,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41243-0x1004ecabe550003, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:15:59,593 INFO [RS:1;b8365d49b74c:41243 {}] regionserver.HRegionServer(1307): Exiting; stopping=b8365d49b74c,41243,1733076913026; zookeeper connection closed. 2024-12-01T18:15:59,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41243-0x1004ecabe550003, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:15:59,593 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6e86301d {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6e86301d 2024-12-01T18:15:59,703 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-01T18:15:59,703 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(3579): Received CLOSE for d90a57a1002207e8dc1819546803b835 2024-12-01T18:15:59,703 DEBUG [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, d90a57a1002207e8dc1819546803b835 2024-12-01T18:15:59,703 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:15:59,703 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing d90a57a1002207e8dc1819546803b835, disabling compactions & flushes 2024-12-01T18:15:59,704 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:15:59,704 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:59,704 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:15:59,704 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:59,704 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:15:59,704 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. after waiting 0 ms 2024-12-01T18:15:59,704 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:15:59,704 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:59,704 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:15:59,704 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for d90a57a1002207e8dc1819546803b835: 2024-12-01T18:15:59,704 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:namespace,,1733076912456.d90a57a1002207e8dc1819546803b835. 2024-12-01T18:15:59,704 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:meta,,1.1588230740 2024-12-01T18:15:59,854 INFO [regionserver/b8365d49b74c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:15:59,869 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076935979 to hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/oldWALs/b8365d49b74c%2C33203%2C1733076911564.1733076935979 2024-12-01T18:15:59,871 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076939992 to hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/oldWALs/b8365d49b74c%2C33203%2C1733076911564.1733076939992 2024-12-01T18:15:59,871 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.1733076940212 to hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/oldWALs/b8365d49b74c%2C33203%2C1733076911564.1733076940212 2024-12-01T18:15:59,904 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1624): We were exiting though online regions are not empty, because some regions failed closing 2024-12-01T18:15:59,904 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1250): stopping server b8365d49b74c,33203,1733076911564; all regions closed. 2024-12-01T18:15:59,904 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564 2024-12-01T18:15:59,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43001 is added to blk_1073741865_1049 (size=93) 2024-12-01T18:15:59,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741865_1049 (size=93) 2024-12-01T18:15:59,932 INFO [regionserver/b8365d49b74c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-01T18:15:59,932 INFO [regionserver/b8365d49b74c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-01T18:16:00,895 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7dc6302b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43001, datanodeUuid=bd09ba3b-250a-46d3-98a5-dc69244ef5d7, infoPort=42187, infoSecurePort=0, ipcPort=40793, storageInfo=lv=-57;cid=testClusterID;nsid=127020978;c=1733076910377):Failed to transfer BP-1689502036-172.17.0.2-1733076910377:blk_1073741836_1012 to 127.0.0.1:36043 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:00,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741838_1014 (size=76) 2024-12-01T18:16:00,916 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5f15df7 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1689502036-172.17.0.2-1733076910377:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:35165,null,null]) java.net.ConnectException: Call From b8365d49b74c/172.17.0.2 to localhost:43445 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-01T18:16:01,115 INFO [regionserver/b8365d49b74c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:16:01,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741830_1045 (size=47160) 2024-12-01T18:16:02,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43001 is added to blk_1073741859_1042 (size=1618) 2024-12-01T18:16:03,520 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta after 4002ms 2024-12-01T18:16:04,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:04,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:04,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:04,559 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:04,559 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:04,907 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-01T18:16:04,908 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564 2024-12-01T18:16:04,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43001 is added to blk_1073741863_1047 (size=13514) 2024-12-01T18:16:04,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741863_1047 (size=13514) 2024-12-01T18:16:04,911 DEBUG [RS:0;b8365d49b74c:33203 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:16:04,911 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:16:04,911 INFO [RS:0;b8365d49b74c:33203 {}] hbase.ChoreService(370): Chore service for: regionserver/b8365d49b74c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-01T18:16:04,911 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-01T18:16:04,912 INFO [RS:0;b8365d49b74c:33203 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33203 2024-12-01T18:16:04,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:16:04,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b8365d49b74c,33203,1733076911564 2024-12-01T18:16:04,916 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b8365d49b74c,33203,1733076911564] 2024-12-01T18:16:04,916 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b8365d49b74c,33203,1733076911564; numProcessing=2 2024-12-01T18:16:04,917 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b8365d49b74c,33203,1733076911564 already deleted, retry=false 2024-12-01T18:16:04,917 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b8365d49b74c,33203,1733076911564 expired; onlineServers=0 2024-12-01T18:16:04,917 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b8365d49b74c,41935,1733076911495' ***** 2024-12-01T18:16:04,917 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T18:16:04,918 DEBUG [M:0;b8365d49b74c:41935 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@194dcd18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b8365d49b74c/172.17.0.2:0 2024-12-01T18:16:04,918 INFO [M:0;b8365d49b74c:41935 {}] regionserver.HRegionServer(1224): stopping server b8365d49b74c,41935,1733076911495 2024-12-01T18:16:04,918 INFO [M:0;b8365d49b74c:41935 {}] regionserver.HRegionServer(1250): stopping server b8365d49b74c,41935,1733076911495; all regions closed. 2024-12-01T18:16:04,918 DEBUG [M:0;b8365d49b74c:41935 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:16:04,918 DEBUG [M:0;b8365d49b74c:41935 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T18:16:04,918 DEBUG [M:0;b8365d49b74c:41935 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T18:16:04,918 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T18:16:04,918 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733076911772 {}] cleaner.HFileCleaner(306): Exit Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733076911772,5,FailOnTimeoutGroup] 2024-12-01T18:16:04,918 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733076911773 {}] cleaner.HFileCleaner(306): Exit Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733076911773,5,FailOnTimeoutGroup] 2024-12-01T18:16:04,918 INFO [M:0;b8365d49b74c:41935 {}] hbase.ChoreService(370): Chore service for: master/b8365d49b74c:0 had [] on shutdown 2024-12-01T18:16:04,918 DEBUG [M:0;b8365d49b74c:41935 {}] master.HMaster(1733): Stopping service threads 2024-12-01T18:16:04,918 INFO [M:0;b8365d49b74c:41935 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T18:16:04,919 INFO [M:0;b8365d49b74c:41935 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T18:16:04,919 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T18:16:04,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T18:16:04,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:04,920 DEBUG [M:0;b8365d49b74c:41935 {}] zookeeper.ZKUtil(347): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-01T18:16:04,920 WARN [M:0;b8365d49b74c:41935 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-01T18:16:04,920 INFO [M:0;b8365d49b74c:41935 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-01T18:16:04,920 INFO [M:0;b8365d49b74c:41935 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T18:16:04,920 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:16:04,920 DEBUG [M:0;b8365d49b74c:41935 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:16:04,920 INFO [M:0;b8365d49b74c:41935 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:04,920 DEBUG [M:0;b8365d49b74c:41935 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:04,920 DEBUG [M:0;b8365d49b74c:41935 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:16:04,920 DEBUG [M:0;b8365d49b74c:41935 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:04,920 INFO [M:0;b8365d49b74c:41935 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.09 KB heapSize=49.30 KB 2024-12-01T18:16:04,937 DEBUG [M:0;b8365d49b74c:41935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/44b8d5a8b4594a57b7969dd6de7da82f is 82, key is hbase:meta,,1/info:regioninfo/1733076912428/Put/seqid=0 2024-12-01T18:16:04,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43001 is added to blk_1073741868_1053 (size=5672) 2024-12-01T18:16:04,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741868_1053 (size=5672) 2024-12-01T18:16:04,943 INFO [M:0;b8365d49b74c:41935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/44b8d5a8b4594a57b7969dd6de7da82f 2024-12-01T18:16:04,964 DEBUG [M:0;b8365d49b74c:41935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/77241bc3e42c49539897e9e0e8a07900 is 775, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733076913553/Put/seqid=0 2024-12-01T18:16:04,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43001 is added to blk_1073741869_1054 (size=7466) 2024-12-01T18:16:04,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741869_1054 (size=7466) 2024-12-01T18:16:04,970 INFO [M:0;b8365d49b74c:41935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.42 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/77241bc3e42c49539897e9e0e8a07900 2024-12-01T18:16:04,990 DEBUG [M:0;b8365d49b74c:41935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4ce055b1b5f7436cb9c02ea210f430b9 is 69, key is b8365d49b74c,33203,1733076911564/rs:state/1733076911825/Put/seqid=0 2024-12-01T18:16:04,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741870_1055 (size=5224) 2024-12-01T18:16:04,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43001 is added to blk_1073741870_1055 (size=5224) 2024-12-01T18:16:04,995 INFO [M:0;b8365d49b74c:41935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4ce055b1b5f7436cb9c02ea210f430b9 2024-12-01T18:16:05,015 DEBUG [M:0;b8365d49b74c:41935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6c9af337e30d4120a19adc7d3013f66f is 52, key is load_balancer_on/state:d/1733076913008/Put/seqid=0 2024-12-01T18:16:05,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:16:05,016 INFO [RS:0;b8365d49b74c:33203 {}] regionserver.HRegionServer(1307): Exiting; stopping=b8365d49b74c,33203,1733076911564; zookeeper connection closed. 2024-12-01T18:16:05,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33203-0x1004ecabe550001, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:16:05,016 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@10497271 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@10497271 2024-12-01T18:16:05,016 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-01T18:16:05,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43001 is added to blk_1073741871_1056 (size=5056) 2024-12-01T18:16:05,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741871_1056 (size=5056) 2024-12-01T18:16:05,021 INFO [M:0;b8365d49b74c:41935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6c9af337e30d4120a19adc7d3013f66f 2024-12-01T18:16:05,028 DEBUG [M:0;b8365d49b74c:41935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/44b8d5a8b4594a57b7969dd6de7da82f as hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/44b8d5a8b4594a57b7969dd6de7da82f 2024-12-01T18:16:05,034 INFO [M:0;b8365d49b74c:41935 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/44b8d5a8b4594a57b7969dd6de7da82f, entries=8, sequenceid=97, filesize=5.5 K 2024-12-01T18:16:05,035 DEBUG [M:0;b8365d49b74c:41935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/77241bc3e42c49539897e9e0e8a07900 as hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/77241bc3e42c49539897e9e0e8a07900 2024-12-01T18:16:05,041 INFO [M:0;b8365d49b74c:41935 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/77241bc3e42c49539897e9e0e8a07900, entries=11, sequenceid=97, filesize=7.3 K 2024-12-01T18:16:05,042 DEBUG [M:0;b8365d49b74c:41935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4ce055b1b5f7436cb9c02ea210f430b9 as hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4ce055b1b5f7436cb9c02ea210f430b9 2024-12-01T18:16:05,048 INFO [M:0;b8365d49b74c:41935 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4ce055b1b5f7436cb9c02ea210f430b9, entries=2, sequenceid=97, filesize=5.1 K 2024-12-01T18:16:05,049 DEBUG [M:0;b8365d49b74c:41935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6c9af337e30d4120a19adc7d3013f66f as hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6c9af337e30d4120a19adc7d3013f66f 2024-12-01T18:16:05,055 INFO [M:0;b8365d49b74c:41935 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6c9af337e30d4120a19adc7d3013f66f, entries=1, sequenceid=97, filesize=4.9 K 2024-12-01T18:16:05,056 INFO [M:0;b8365d49b74c:41935 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.09 KB/41051, heapSize ~49.23 KB/50416, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 136ms, sequenceid=97, compaction requested=false 2024-12-01T18:16:05,058 INFO [M:0;b8365d49b74c:41935 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:05,058 DEBUG [M:0;b8365d49b74c:41935 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:16:05,058 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/MasterData/WALs/b8365d49b74c,41935,1733076911495 2024-12-01T18:16:05,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43001 is added to blk_1073741861_1044 (size=757) 2024-12-01T18:16:05,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44007 is added to blk_1073741861_1044 (size=757) 2024-12-01T18:16:05,061 INFO [M:0;b8365d49b74c:41935 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-01T18:16:05,061 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-01T18:16:05,061 INFO [M:0;b8365d49b74c:41935 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41935 2024-12-01T18:16:05,063 DEBUG [M:0;b8365d49b74c:41935 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/b8365d49b74c,41935,1733076911495 already deleted, retry=false 2024-12-01T18:16:05,066 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-01T18:16:05,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:05,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:05,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:05,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:05,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:05,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:16:05,165 INFO [M:0;b8365d49b74c:41935 {}] regionserver.HRegionServer(1307): Exiting; stopping=b8365d49b74c,41935,1733076911495; zookeeper connection closed. 2024-12-01T18:16:05,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1004ecabe550000, quorum=127.0.0.1:64640, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:16:05,167 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@857d37c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:05,168 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@560500bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:16:05,168 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:16:05,168 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69f08f63{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:16:05,168 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18fb5ac3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,STOPPED} 2024-12-01T18:16:05,169 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:16:05,170 WARN [BP-1689502036-172.17.0.2-1733076910377 heartbeating to localhost/127.0.0.1:41701 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:16:05,170 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:16:05,169 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@77ec5321 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1689502036-172.17.0.2-1733076910377:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:35165,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:43445 , LocalHost:localPort b8365d49b74c/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-01T18:16:05,170 WARN [BP-1689502036-172.17.0.2-1733076910377 heartbeating to localhost/127.0.0.1:41701 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1689502036-172.17.0.2-1733076910377 (Datanode Uuid bd09ba3b-250a-46d3-98a5-dc69244ef5d7) service to localhost/127.0.0.1:41701 2024-12-01T18:16:05,170 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data3/current/BP-1689502036-172.17.0.2-1733076910377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:05,171 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data4/current/BP-1689502036-172.17.0.2-1733076910377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:05,171 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@77ec5321 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(293): Failed to updateBlock (newblock=BP-1689502036-172.17.0.2-1733076910377:blk_1073741834_1052, datanode=DatanodeInfoWithStorage[127.0.0.1:43001,null,null]) org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: Replica not found for BP-1689502036-172.17.0.2-1733076910377:blk_1073741834_1010[numBytes=3714,originalReplicaState=RWR]. The block may have been removed recently by the balancer or by intentionally reducing the replication factor. This condition is usually harmless. To be certain, please check the preceding datanode log messages for signs of a more serious issue. at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.updateReplicaUnderRecovery(FsDatasetImpl.java:3104) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode.updateReplicaUnderRecovery(DataNode.java:3537) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$BlockRecord.updateReplicaUnderRecovery(BlockRecoveryWorker.java:88) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$BlockRecord.access$700(BlockRecoveryWorker.java:71) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.syncBlock(BlockRecoveryWorker.java:289) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:183) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:05,171 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:16:05,171 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@77ec5321 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1689502036-172.17.0.2-1733076910377:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:43001,null,null], DatanodeInfoWithStorage[127.0.0.1:35165,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: Cannot recover BP-1689502036-172.17.0.2-1733076910377:blk_1073741834_1010, the following datanodes failed: [DatanodeInfoWithStorage[127.0.0.1:43001,null,null]] 2024-12-01T18:16:05,173 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18097a5f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:05,173 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@52e765a8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:16:05,173 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:16:05,174 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b6caab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:16:05,174 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47b90ec5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,STOPPED} 2024-12-01T18:16:05,175 WARN [BP-1689502036-172.17.0.2-1733076910377 heartbeating to localhost/127.0.0.1:41701 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:16:05,175 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:16:05,175 WARN [BP-1689502036-172.17.0.2-1733076910377 heartbeating to localhost/127.0.0.1:41701 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1689502036-172.17.0.2-1733076910377 (Datanode Uuid dcb8a0c1-6df1-4932-894d-369da74a0cbb) service to localhost/127.0.0.1:41701 2024-12-01T18:16:05,175 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:16:05,175 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data7/current/BP-1689502036-172.17.0.2-1733076910377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:05,175 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/cluster_4dbd280b-3b13-204a-c8e7-714d36079548/dfs/data/data8/current/BP-1689502036-172.17.0.2-1733076910377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:05,176 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:16:05,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@722cfddf{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:16:05,182 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@411ed8c5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:16:05,182 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:16:05,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25d9b335{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:16:05,183 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49bb953b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir/,STOPPED} 2024-12-01T18:16:05,190 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-01T18:16:05,218 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-01T18:16:05,226 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=86 (was 62) Potentially hanging thread: nioEventLoopGroup-12-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:41701 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (1454462474) connection to localhost/127.0.0.1:41701 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-12-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41701 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-13-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1454462474) connection to localhost/127.0.0.1:41701 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-12-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41701 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-13-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1454462474) connection to localhost/127.0.0.1:41701 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:41701 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$797/0x00007f5764b87000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41701 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41701 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-13-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=430 (was 403) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=171 (was 169) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3434 (was 2943) - AvailableMemoryMB LEAK? - 2024-12-01T18:16:05,232 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=86, OpenFileDescriptor=430, MaxFileDescriptor=1048576, SystemLoadAverage=171, ProcessCount=11, AvailableMemoryMB=3434 2024-12-01T18:16:05,232 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T18:16:05,232 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.log.dir so I do NOT create it in target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075 2024-12-01T18:16:05,232 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/618a53fe-0237-aba8-2017-c601437a2fec/hadoop.tmp.dir so I do NOT create it in target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075 2024-12-01T18:16:05,233 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1, deleteOnExit=true 2024-12-01T18:16:05,233 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-01T18:16:05,233 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/test.cache.data in system properties and HBase conf 2024-12-01T18:16:05,233 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T18:16:05,233 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir in system properties and HBase conf 2024-12-01T18:16:05,233 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T18:16:05,233 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T18:16:05,233 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-01T18:16:05,233 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T18:16:05,233 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:16:05,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:16:05,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T18:16:05,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:16:05,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T18:16:05,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T18:16:05,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:16:05,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:16:05,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T18:16:05,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/nfs.dump.dir in system properties and HBase conf 2024-12-01T18:16:05,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/java.io.tmpdir in system properties and HBase conf 2024-12-01T18:16:05,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:16:05,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T18:16:05,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T18:16:05,247 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-01T18:16:05,313 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:16:05,318 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:16:05,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:16:05,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:16:05,319 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T18:16:05,320 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:16:05,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72060fdc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:16:05,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@611c74c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:16:05,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24cb120{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/java.io.tmpdir/jetty-localhost-38231-hadoop-hdfs-3_4_1-tests_jar-_-any-16320541143075280076/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:16:05,436 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5c1f50f5{HTTP/1.1, (http/1.1)}{localhost:38231} 2024-12-01T18:16:05,436 INFO [Time-limited test {}] server.Server(415): Started @172730ms 2024-12-01T18:16:05,449 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-01T18:16:05,513 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:16:05,516 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:16:05,516 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:16:05,516 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:16:05,516 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:16:05,517 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b0b4f60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:16:05,517 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@457fbe4e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:16:05,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:05,632 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@48848c47{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/java.io.tmpdir/jetty-localhost-46057-hadoop-hdfs-3_4_1-tests_jar-_-any-7395802242452094261/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:05,633 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@c987a9{HTTP/1.1, (http/1.1)}{localhost:46057} 2024-12-01T18:16:05,633 INFO [Time-limited test {}] server.Server(415): Started @172926ms 2024-12-01T18:16:05,634 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:16:05,668 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:16:05,674 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:16:05,675 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:16:05,675 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:16:05,675 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T18:16:05,675 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@471c148c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:16:05,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79424f0d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:16:05,713 WARN [Thread-931 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data1/current/BP-572904914-172.17.0.2-1733076965265/current, will proceed with Du for space computation calculation, 2024-12-01T18:16:05,713 WARN [Thread-932 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data2/current/BP-572904914-172.17.0.2-1733076965265/current, will proceed with Du for space computation calculation, 2024-12-01T18:16:05,736 WARN [Thread-910 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:16:05,739 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbb115a389750e800 with lease ID 0xb2c22b15c708e83b: Processing first storage report for DS-93693323-6788-4ac9-a6b2-0b88f3f20111 from datanode DatanodeRegistration(127.0.0.1:41499, datanodeUuid=20fd19ec-4c9e-4592-982f-e43420840404, infoPort=33711, infoSecurePort=0, ipcPort=36643, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265) 2024-12-01T18:16:05,739 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb115a389750e800 with lease ID 0xb2c22b15c708e83b: from storage DS-93693323-6788-4ac9-a6b2-0b88f3f20111 node DatanodeRegistration(127.0.0.1:41499, datanodeUuid=20fd19ec-4c9e-4592-982f-e43420840404, infoPort=33711, infoSecurePort=0, ipcPort=36643, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:16:05,739 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbb115a389750e800 with lease ID 0xb2c22b15c708e83b: Processing first storage report for DS-1d805745-55e5-42bd-b5cc-8f9549db2ff8 from datanode DatanodeRegistration(127.0.0.1:41499, datanodeUuid=20fd19ec-4c9e-4592-982f-e43420840404, infoPort=33711, infoSecurePort=0, ipcPort=36643, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265) 2024-12-01T18:16:05,739 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb115a389750e800 with lease ID 0xb2c22b15c708e83b: from storage DS-1d805745-55e5-42bd-b5cc-8f9549db2ff8 node DatanodeRegistration(127.0.0.1:41499, datanodeUuid=20fd19ec-4c9e-4592-982f-e43420840404, infoPort=33711, infoSecurePort=0, ipcPort=36643, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:16:05,808 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2fde4d38{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/java.io.tmpdir/jetty-localhost-46797-hadoop-hdfs-3_4_1-tests_jar-_-any-14077095779119163704/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:05,808 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@46eea223{HTTP/1.1, (http/1.1)}{localhost:46797} 2024-12-01T18:16:05,808 INFO [Time-limited test {}] server.Server(415): Started @173102ms 2024-12-01T18:16:05,810 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:16:05,891 WARN [Thread-957 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data3/current/BP-572904914-172.17.0.2-1733076965265/current, will proceed with Du for space computation calculation, 2024-12-01T18:16:05,891 WARN [Thread-958 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data4/current/BP-572904914-172.17.0.2-1733076965265/current, will proceed with Du for space computation calculation, 2024-12-01T18:16:05,913 WARN [Thread-946 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:16:05,915 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7c324610271b1d00 with lease ID 0xb2c22b15c708e83c: Processing first storage report for DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d from datanode DatanodeRegistration(127.0.0.1:36777, datanodeUuid=c52e09f9-85ef-4b8b-ac10-26eb94a71d50, infoPort=39879, infoSecurePort=0, ipcPort=40273, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265) 2024-12-01T18:16:05,915 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c324610271b1d00 with lease ID 0xb2c22b15c708e83c: from storage DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d node DatanodeRegistration(127.0.0.1:36777, datanodeUuid=c52e09f9-85ef-4b8b-ac10-26eb94a71d50, infoPort=39879, infoSecurePort=0, ipcPort=40273, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:16:05,915 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7c324610271b1d00 with lease ID 0xb2c22b15c708e83c: Processing first storage report for DS-3190f951-5c20-4004-81fa-2588c06a618b from datanode DatanodeRegistration(127.0.0.1:36777, datanodeUuid=c52e09f9-85ef-4b8b-ac10-26eb94a71d50, infoPort=39879, infoSecurePort=0, ipcPort=40273, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265) 2024-12-01T18:16:05,915 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c324610271b1d00 with lease ID 0xb2c22b15c708e83c: from storage DS-3190f951-5c20-4004-81fa-2588c06a618b node DatanodeRegistration(127.0.0.1:36777, datanodeUuid=c52e09f9-85ef-4b8b-ac10-26eb94a71d50, infoPort=39879, infoSecurePort=0, ipcPort=40273, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:16:05,938 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075 2024-12-01T18:16:05,944 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/zookeeper_0, clientPort=56873, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T18:16:05,945 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=56873 2024-12-01T18:16:05,946 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:05,947 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:05,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36777 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:16:05,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41499 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:16:05,959 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355 with version=8 2024-12-01T18:16:05,959 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/hbase-staging 2024-12-01T18:16:05,961 INFO [Time-limited test {}] client.ConnectionUtils(129): master/b8365d49b74c:0 server-side Connection retries=45 2024-12-01T18:16:05,961 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:16:05,961 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:16:05,961 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:16:05,961 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:16:05,961 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:16:05,961 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:16:05,962 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:16:05,962 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46671 2024-12-01T18:16:05,963 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:05,964 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:05,966 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:46671 connecting to ZooKeeper ensemble=127.0.0.1:56873 2024-12-01T18:16:05,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:466710x0, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:16:05,972 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46671-0x1004ecb931b0000 connected 2024-12-01T18:16:05,987 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:16:05,987 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:16:05,988 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:16:05,992 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46671 2024-12-01T18:16:05,992 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46671 2024-12-01T18:16:05,994 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46671 2024-12-01T18:16:05,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46671 2024-12-01T18:16:05,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46671 2024-12-01T18:16:05,995 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355, hbase.cluster.distributed=false 2024-12-01T18:16:06,030 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b8365d49b74c:0 server-side Connection retries=45 2024-12-01T18:16:06,030 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:16:06,030 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:16:06,030 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:16:06,030 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:16:06,030 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:16:06,030 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:16:06,030 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:16:06,031 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41671 2024-12-01T18:16:06,031 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T18:16:06,032 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T18:16:06,032 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:06,034 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:06,037 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41671 connecting to ZooKeeper ensemble=127.0.0.1:56873 2024-12-01T18:16:06,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:416710x0, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:16:06,039 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:416710x0, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:16:06,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41671-0x1004ecb931b0001 connected 2024-12-01T18:16:06,040 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:16:06,040 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:16:06,041 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41671 2024-12-01T18:16:06,041 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41671 2024-12-01T18:16:06,041 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41671 2024-12-01T18:16:06,041 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41671 2024-12-01T18:16:06,041 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41671 2024-12-01T18:16:06,042 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/b8365d49b74c,46671,1733076965961 2024-12-01T18:16:06,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:16:06,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:16:06,045 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b8365d49b74c,46671,1733076965961 2024-12-01T18:16:06,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:16:06,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:16:06,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:06,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:06,047 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:16:06,048 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b8365d49b74c,46671,1733076965961 from backup master directory 2024-12-01T18:16:06,049 WARN [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:16:06,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:16:06,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b8365d49b74c,46671,1733076965961 2024-12-01T18:16:06,049 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:16:06,049 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b8365d49b74c,46671,1733076965961 2024-12-01T18:16:06,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:16:06,054 DEBUG [M:0;b8365d49b74c:46671 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b8365d49b74c:46671 2024-12-01T18:16:06,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41499 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:16:06,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36777 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:16:06,069 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/hbase.id with ID: ae039bd9-0580-4b91-83ec-fa93e296539d 2024-12-01T18:16:06,080 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:06,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:06,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:06,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36777 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:16:06,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41499 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:16:06,090 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:16:06,091 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T18:16:06,091 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:16:06,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36777 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:16:06,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41499 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:16:06,099 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store 2024-12-01T18:16:06,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36777 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:16:06,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41499 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:16:06,106 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:06,106 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:16:06,106 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:06,106 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:06,106 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:16:06,106 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:06,106 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:06,106 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:16:06,107 WARN [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/.initializing 2024-12-01T18:16:06,107 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/WALs/b8365d49b74c,46671,1733076965961 2024-12-01T18:16:06,110 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C46671%2C1733076965961, suffix=, logDir=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/WALs/b8365d49b74c,46671,1733076965961, archiveDir=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/oldWALs, maxLogs=10 2024-12-01T18:16:06,110 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C46671%2C1733076965961.1733076966110 2024-12-01T18:16:06,114 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/WALs/b8365d49b74c,46671,1733076965961/b8365d49b74c%2C46671%2C1733076965961.1733076966110 2024-12-01T18:16:06,115 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33711:33711),(127.0.0.1/127.0.0.1:39879:39879)] 2024-12-01T18:16:06,115 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:16:06,115 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:06,115 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:06,115 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:06,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:06,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T18:16:06,118 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:06,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:06,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:06,119 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T18:16:06,120 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:06,120 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:16:06,120 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:06,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T18:16:06,121 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:06,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:16:06,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:06,123 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T18:16:06,123 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:06,123 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:16:06,124 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:06,124 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:06,126 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T18:16:06,127 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:06,129 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:16:06,130 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=715293, jitterRate=-0.09045882523059845}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T18:16:06,130 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:16:06,131 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T18:16:06,133 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1aa0abb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:16:06,134 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-01T18:16:06,134 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T18:16:06,134 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T18:16:06,135 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T18:16:06,135 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-01T18:16:06,135 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-01T18:16:06,135 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T18:16:06,137 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T18:16:06,138 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T18:16:06,139 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-01T18:16:06,139 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T18:16:06,140 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T18:16:06,140 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-01T18:16:06,141 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T18:16:06,141 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T18:16:06,142 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-01T18:16:06,143 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T18:16:06,144 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T18:16:06,145 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T18:16:06,146 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T18:16:06,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:16:06,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:06,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:16:06,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:06,148 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=b8365d49b74c,46671,1733076965961, sessionid=0x1004ecb931b0000, setting cluster-up flag (Was=false) 2024-12-01T18:16:06,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:06,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:06,154 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T18:16:06,155 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b8365d49b74c,46671,1733076965961 2024-12-01T18:16:06,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:06,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:06,161 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T18:16:06,162 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b8365d49b74c,46671,1733076965961 2024-12-01T18:16:06,164 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-01T18:16:06,164 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-01T18:16:06,164 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T18:16:06,165 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b8365d49b74c,46671,1733076965961 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T18:16:06,165 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:16:06,165 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:16:06,165 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:16:06,165 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:16:06,165 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b8365d49b74c:0, corePoolSize=10, maxPoolSize=10 2024-12-01T18:16:06,165 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:06,165 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b8365d49b74c:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:16:06,165 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:06,168 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733076996168 2024-12-01T18:16:06,168 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:16:06,168 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-01T18:16:06,168 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T18:16:06,168 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T18:16:06,168 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T18:16:06,168 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T18:16:06,168 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T18:16:06,168 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T18:16:06,169 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,169 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T18:16:06,169 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:06,169 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T18:16:06,169 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T18:16:06,169 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:16:06,169 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T18:16:06,170 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T18:16:06,170 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733076966170,5,FailOnTimeoutGroup] 2024-12-01T18:16:06,171 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733076966170,5,FailOnTimeoutGroup] 2024-12-01T18:16:06,171 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,171 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T18:16:06,171 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,171 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36777 is added to blk_1073741831_1007 (size=1039) 2024-12-01T18:16:06,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41499 is added to blk_1073741831_1007 (size=1039) 2024-12-01T18:16:06,181 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-01T18:16:06,181 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355 2024-12-01T18:16:06,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36777 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:16:06,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41499 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:16:06,192 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:06,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:16:06,194 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:16:06,194 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:06,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:06,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:16:06,196 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:16:06,196 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:06,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:06,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:16:06,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:16:06,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:06,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:06,199 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/meta/1588230740 2024-12-01T18:16:06,200 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/meta/1588230740 2024-12-01T18:16:06,201 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-01T18:16:06,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-01T18:16:06,205 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:16:06,205 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698803, jitterRate=-0.11142657697200775}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-01T18:16:06,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-01T18:16:06,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:16:06,206 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:16:06,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:16:06,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:16:06,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:16:06,206 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-01T18:16:06,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:16:06,207 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:16:06,207 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-01T18:16:06,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T18:16:06,209 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T18:16:06,210 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T18:16:06,254 DEBUG [RS:0;b8365d49b74c:41671 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b8365d49b74c:41671 2024-12-01T18:16:06,255 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1008): ClusterId : ae039bd9-0580-4b91-83ec-fa93e296539d 2024-12-01T18:16:06,255 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T18:16:06,257 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T18:16:06,257 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T18:16:06,259 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T18:16:06,259 DEBUG [RS:0;b8365d49b74c:41671 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bfaf3ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:16:06,260 DEBUG [RS:0;b8365d49b74c:41671 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b6c7efb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b8365d49b74c/172.17.0.2:0 2024-12-01T18:16:06,260 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-01T18:16:06,260 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-01T18:16:06,260 DEBUG [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-01T18:16:06,260 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(3073): reportForDuty to master=b8365d49b74c,46671,1733076965961 with isa=b8365d49b74c/172.17.0.2:41671, startcode=1733076966029 2024-12-01T18:16:06,260 DEBUG [RS:0;b8365d49b74c:41671 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:16:06,263 INFO [RS-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43961, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:16:06,263 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46671 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b8365d49b74c,41671,1733076966029 2024-12-01T18:16:06,263 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46671 {}] master.ServerManager(486): Registering regionserver=b8365d49b74c,41671,1733076966029 2024-12-01T18:16:06,265 DEBUG [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355 2024-12-01T18:16:06,265 DEBUG [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:38703 2024-12-01T18:16:06,265 DEBUG [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-01T18:16:06,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:16:06,267 DEBUG [RS:0;b8365d49b74c:41671 {}] zookeeper.ZKUtil(111): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b8365d49b74c,41671,1733076966029 2024-12-01T18:16:06,267 WARN [RS:0;b8365d49b74c:41671 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:16:06,267 INFO [RS:0;b8365d49b74c:41671 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:16:06,267 DEBUG [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029 2024-12-01T18:16:06,267 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b8365d49b74c,41671,1733076966029] 2024-12-01T18:16:06,270 DEBUG [RS:0;b8365d49b74c:41671 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-01T18:16:06,271 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T18:16:06,272 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T18:16:06,272 INFO [RS:0;b8365d49b74c:41671 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:16:06,273 INFO [RS:0;b8365d49b74c:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,273 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-01T18:16:06,274 INFO [RS:0;b8365d49b74c:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,274 DEBUG [RS:0;b8365d49b74c:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:06,274 DEBUG [RS:0;b8365d49b74c:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:06,274 DEBUG [RS:0;b8365d49b74c:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:06,274 DEBUG [RS:0;b8365d49b74c:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:06,274 DEBUG [RS:0;b8365d49b74c:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:06,274 DEBUG [RS:0;b8365d49b74c:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b8365d49b74c:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:16:06,274 DEBUG [RS:0;b8365d49b74c:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:06,274 DEBUG [RS:0;b8365d49b74c:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:06,274 DEBUG [RS:0;b8365d49b74c:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:06,275 DEBUG [RS:0;b8365d49b74c:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:06,275 DEBUG [RS:0;b8365d49b74c:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:06,275 DEBUG [RS:0;b8365d49b74c:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:16:06,275 DEBUG [RS:0;b8365d49b74c:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:16:06,275 INFO [RS:0;b8365d49b74c:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,276 INFO [RS:0;b8365d49b74c:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,276 INFO [RS:0;b8365d49b74c:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,276 INFO [RS:0;b8365d49b74c:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,276 INFO [RS:0;b8365d49b74c:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41671,1733076966029-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:16:06,294 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T18:16:06,295 INFO [RS:0;b8365d49b74c:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41671,1733076966029-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,309 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.Replication(204): b8365d49b74c,41671,1733076966029 started 2024-12-01T18:16:06,309 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1767): Serving as b8365d49b74c,41671,1733076966029, RpcServer on b8365d49b74c/172.17.0.2:41671, sessionid=0x1004ecb931b0001 2024-12-01T18:16:06,309 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T18:16:06,309 DEBUG [RS:0;b8365d49b74c:41671 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b8365d49b74c,41671,1733076966029 2024-12-01T18:16:06,309 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,41671,1733076966029' 2024-12-01T18:16:06,309 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T18:16:06,309 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T18:16:06,310 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T18:16:06,310 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T18:16:06,310 DEBUG [RS:0;b8365d49b74c:41671 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b8365d49b74c,41671,1733076966029 2024-12-01T18:16:06,310 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,41671,1733076966029' 2024-12-01T18:16:06,310 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T18:16:06,310 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T18:16:06,310 DEBUG [RS:0;b8365d49b74c:41671 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T18:16:06,311 INFO [RS:0;b8365d49b74c:41671 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T18:16:06,311 INFO [RS:0;b8365d49b74c:41671 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T18:16:06,360 WARN [b8365d49b74c:46671 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-01T18:16:06,413 INFO [RS:0;b8365d49b74c:41671 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C41671%2C1733076966029, suffix=, logDir=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029, archiveDir=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/oldWALs, maxLogs=32 2024-12-01T18:16:06,414 INFO [RS:0;b8365d49b74c:41671 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41671%2C1733076966029.1733076966414 2024-12-01T18:16:06,420 INFO [RS:0;b8365d49b74c:41671 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 2024-12-01T18:16:06,420 DEBUG [RS:0;b8365d49b74c:41671 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39879:39879),(127.0.0.1/127.0.0.1:33711:33711)] 2024-12-01T18:16:06,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:06,611 DEBUG [b8365d49b74c:46671 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-01T18:16:06,611 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b8365d49b74c,41671,1733076966029 2024-12-01T18:16:06,612 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b8365d49b74c,41671,1733076966029, state=OPENING 2024-12-01T18:16:06,614 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T18:16:06,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:06,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:06,616 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=b8365d49b74c,41671,1733076966029}] 2024-12-01T18:16:06,616 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:16:06,616 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:16:06,769 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41671,1733076966029 2024-12-01T18:16:06,769 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T18:16:06,771 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53678, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T18:16:06,774 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-01T18:16:06,774 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:16:06,776 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C41671%2C1733076966029.meta, suffix=.meta, logDir=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029, archiveDir=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/oldWALs, maxLogs=32 2024-12-01T18:16:06,777 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41671%2C1733076966029.meta.1733076966777.meta 2024-12-01T18:16:06,783 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.meta.1733076966777.meta 2024-12-01T18:16:06,783 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39879:39879),(127.0.0.1/127.0.0.1:33711:33711)] 2024-12-01T18:16:06,783 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:16:06,784 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T18:16:06,784 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T18:16:06,784 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T18:16:06,784 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T18:16:06,784 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:06,784 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-01T18:16:06,784 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-01T18:16:06,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:16:06,786 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:16:06,787 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:06,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:06,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:16:06,788 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:16:06,788 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:06,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:06,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:16:06,789 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:16:06,789 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:06,790 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:06,791 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/meta/1588230740 2024-12-01T18:16:06,792 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/meta/1588230740 2024-12-01T18:16:06,793 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-01T18:16:06,795 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-01T18:16:06,795 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=869925, jitterRate=0.10616761445999146}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-01T18:16:06,796 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-01T18:16:06,796 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733076966768 2024-12-01T18:16:06,799 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T18:16:06,799 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-01T18:16:06,799 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,41671,1733076966029 2024-12-01T18:16:06,800 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b8365d49b74c,41671,1733076966029, state=OPEN 2024-12-01T18:16:06,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:16:06,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:16:06,803 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:16:06,803 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:16:06,805 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T18:16:06,805 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=b8365d49b74c,41671,1733076966029 in 187 msec 2024-12-01T18:16:06,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T18:16:06,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 597 msec 2024-12-01T18:16:06,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 645 msec 2024-12-01T18:16:06,810 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733076966810, completionTime=-1 2024-12-01T18:16:06,810 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-01T18:16:06,810 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-01T18:16:06,811 DEBUG [hconnection-0x22383d0-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:16:06,812 INFO [RS-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53680, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:16:06,813 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-01T18:16:06,813 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733077026813 2024-12-01T18:16:06,813 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733077086813 2024-12-01T18:16:06,813 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-01T18:16:06,818 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,46671,1733076965961-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,819 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,46671,1733076965961-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,819 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,46671,1733076965961-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,819 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b8365d49b74c:46671, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,819 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:06,819 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-01T18:16:06,819 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:16:06,820 DEBUG [master/b8365d49b74c:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-01T18:16:06,820 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-01T18:16:06,821 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T18:16:06,822 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:06,822 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T18:16:06,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41499 is added to blk_1073741835_1011 (size=358) 2024-12-01T18:16:06,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36777 is added to blk_1073741835_1011 (size=358) 2024-12-01T18:16:06,832 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 55a8e2f83dea527a3f6dc58337aa0768, NAME => 'hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355 2024-12-01T18:16:06,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36777 is added to blk_1073741836_1012 (size=42) 2024-12-01T18:16:06,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41499 is added to blk_1073741836_1012 (size=42) 2024-12-01T18:16:06,838 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:06,838 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 55a8e2f83dea527a3f6dc58337aa0768, disabling compactions & flushes 2024-12-01T18:16:06,838 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. 2024-12-01T18:16:06,838 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. 2024-12-01T18:16:06,838 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. after waiting 0 ms 2024-12-01T18:16:06,838 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. 2024-12-01T18:16:06,838 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. 2024-12-01T18:16:06,838 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 55a8e2f83dea527a3f6dc58337aa0768: 2024-12-01T18:16:06,839 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T18:16:06,840 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733076966839"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733076966839"}]},"ts":"1733076966839"} 2024-12-01T18:16:06,842 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-01T18:16:06,842 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T18:16:06,843 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733076966842"}]},"ts":"1733076966842"} 2024-12-01T18:16:06,844 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-01T18:16:06,847 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=55a8e2f83dea527a3f6dc58337aa0768, ASSIGN}] 2024-12-01T18:16:06,848 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=55a8e2f83dea527a3f6dc58337aa0768, ASSIGN 2024-12-01T18:16:06,849 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=55a8e2f83dea527a3f6dc58337aa0768, ASSIGN; state=OFFLINE, location=b8365d49b74c,41671,1733076966029; forceNewPlan=false, retain=false 2024-12-01T18:16:07,000 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=55a8e2f83dea527a3f6dc58337aa0768, regionState=OPENING, regionLocation=b8365d49b74c,41671,1733076966029 2024-12-01T18:16:07,002 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 55a8e2f83dea527a3f6dc58337aa0768, server=b8365d49b74c,41671,1733076966029}] 2024-12-01T18:16:07,155 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41671,1733076966029 2024-12-01T18:16:07,158 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. 2024-12-01T18:16:07,159 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 55a8e2f83dea527a3f6dc58337aa0768, NAME => 'hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:16:07,159 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 55a8e2f83dea527a3f6dc58337aa0768 2024-12-01T18:16:07,159 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:07,159 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 55a8e2f83dea527a3f6dc58337aa0768 2024-12-01T18:16:07,159 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 55a8e2f83dea527a3f6dc58337aa0768 2024-12-01T18:16:07,160 INFO [StoreOpener-55a8e2f83dea527a3f6dc58337aa0768-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 55a8e2f83dea527a3f6dc58337aa0768 2024-12-01T18:16:07,162 INFO [StoreOpener-55a8e2f83dea527a3f6dc58337aa0768-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 55a8e2f83dea527a3f6dc58337aa0768 columnFamilyName info 2024-12-01T18:16:07,162 DEBUG [StoreOpener-55a8e2f83dea527a3f6dc58337aa0768-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:07,163 INFO [StoreOpener-55a8e2f83dea527a3f6dc58337aa0768-1 {}] regionserver.HStore(327): Store=55a8e2f83dea527a3f6dc58337aa0768/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:16:07,163 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/namespace/55a8e2f83dea527a3f6dc58337aa0768 2024-12-01T18:16:07,164 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/namespace/55a8e2f83dea527a3f6dc58337aa0768 2024-12-01T18:16:07,170 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 55a8e2f83dea527a3f6dc58337aa0768 2024-12-01T18:16:07,172 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/namespace/55a8e2f83dea527a3f6dc58337aa0768/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:16:07,172 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 55a8e2f83dea527a3f6dc58337aa0768; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=801680, jitterRate=0.019389137625694275}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:16:07,172 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 55a8e2f83dea527a3f6dc58337aa0768: 2024-12-01T18:16:07,173 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768., pid=6, masterSystemTime=1733076967154 2024-12-01T18:16:07,175 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. 2024-12-01T18:16:07,175 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. 2024-12-01T18:16:07,176 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=55a8e2f83dea527a3f6dc58337aa0768, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,41671,1733076966029 2024-12-01T18:16:07,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T18:16:07,181 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 55a8e2f83dea527a3f6dc58337aa0768, server=b8365d49b74c,41671,1733076966029 in 175 msec 2024-12-01T18:16:07,182 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T18:16:07,182 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=55a8e2f83dea527a3f6dc58337aa0768, ASSIGN in 333 msec 2024-12-01T18:16:07,183 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T18:16:07,183 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733076967183"}]},"ts":"1733076967183"} 2024-12-01T18:16:07,185 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-01T18:16:07,188 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T18:16:07,190 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 369 msec 2024-12-01T18:16:07,222 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-01T18:16:07,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:16:07,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:07,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:07,227 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-01T18:16:07,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:16:07,238 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 11 msec 2024-12-01T18:16:07,249 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-01T18:16:07,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:16:07,259 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 10 msec 2024-12-01T18:16:07,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-01T18:16:07,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-01T18:16:07,266 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.217sec 2024-12-01T18:16:07,266 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-01T18:16:07,266 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T18:16:07,266 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T18:16:07,266 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T18:16:07,266 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T18:16:07,266 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,46671,1733076965961-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:16:07,266 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,46671,1733076965961-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T18:16:07,268 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-01T18:16:07,268 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T18:16:07,268 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,46671,1733076965961-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:07,344 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65f88687 to 127.0.0.1:56873 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1cdd08b4 2024-12-01T18:16:07,348 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60fc940c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:16:07,349 DEBUG [hconnection-0x1410a80f-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:16:07,351 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53696, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:16:07,352 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=b8365d49b74c,46671,1733076965961 2024-12-01T18:16:07,353 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:07,356 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-01T18:16:07,356 INFO [Time-limited test {}] wal.TestLogRolling(297): Starting testLogRollOnPipelineRestart 2024-12-01T18:16:07,356 INFO [Time-limited test {}] wal.TestLogRolling(300): Replication=2 2024-12-01T18:16:07,357 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T18:16:07,358 INFO [RS-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43362, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T18:16:07,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46671 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-01T18:16:07,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46671 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-01T18:16:07,360 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46671 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:16:07,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46671 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-01T18:16:07,362 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T18:16:07,362 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:07,362 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46671 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 9 2024-12-01T18:16:07,363 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T18:16:07,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46671 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-01T18:16:07,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36777 is added to blk_1073741837_1013 (size=395) 2024-12-01T18:16:07,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41499 is added to blk_1073741837_1013 (size=395) 2024-12-01T18:16:07,374 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f0c7eee6eaa95dc6e646f1875aa0645d, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355 2024-12-01T18:16:07,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36777 is added to blk_1073741838_1014 (size=78) 2024-12-01T18:16:07,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41499 is added to blk_1073741838_1014 (size=78) 2024-12-01T18:16:07,381 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:07,381 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1681): Closing f0c7eee6eaa95dc6e646f1875aa0645d, disabling compactions & flushes 2024-12-01T18:16:07,381 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:07,381 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:07,381 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. after waiting 0 ms 2024-12-01T18:16:07,381 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:07,381 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:07,381 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1635): Region close journal for f0c7eee6eaa95dc6e646f1875aa0645d: 2024-12-01T18:16:07,383 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T18:16:07,383 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733076967383"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733076967383"}]},"ts":"1733076967383"} 2024-12-01T18:16:07,385 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-01T18:16:07,385 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T18:16:07,386 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733076967386"}]},"ts":"1733076967386"} 2024-12-01T18:16:07,387 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-01T18:16:07,390 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f0c7eee6eaa95dc6e646f1875aa0645d, ASSIGN}] 2024-12-01T18:16:07,391 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f0c7eee6eaa95dc6e646f1875aa0645d, ASSIGN 2024-12-01T18:16:07,392 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f0c7eee6eaa95dc6e646f1875aa0645d, ASSIGN; state=OFFLINE, location=b8365d49b74c,41671,1733076966029; forceNewPlan=false, retain=false 2024-12-01T18:16:07,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:07,543 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=f0c7eee6eaa95dc6e646f1875aa0645d, regionState=OPENING, regionLocation=b8365d49b74c,41671,1733076966029 2024-12-01T18:16:07,545 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure f0c7eee6eaa95dc6e646f1875aa0645d, server=b8365d49b74c,41671,1733076966029}] 2024-12-01T18:16:07,697 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41671,1733076966029 2024-12-01T18:16:07,702 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:07,702 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => f0c7eee6eaa95dc6e646f1875aa0645d, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:16:07,702 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart f0c7eee6eaa95dc6e646f1875aa0645d 2024-12-01T18:16:07,702 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:07,702 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for f0c7eee6eaa95dc6e646f1875aa0645d 2024-12-01T18:16:07,703 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for f0c7eee6eaa95dc6e646f1875aa0645d 2024-12-01T18:16:07,704 INFO [StoreOpener-f0c7eee6eaa95dc6e646f1875aa0645d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f0c7eee6eaa95dc6e646f1875aa0645d 2024-12-01T18:16:07,705 INFO [StoreOpener-f0c7eee6eaa95dc6e646f1875aa0645d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f0c7eee6eaa95dc6e646f1875aa0645d columnFamilyName info 2024-12-01T18:16:07,705 DEBUG [StoreOpener-f0c7eee6eaa95dc6e646f1875aa0645d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:07,706 INFO [StoreOpener-f0c7eee6eaa95dc6e646f1875aa0645d-1 {}] regionserver.HStore(327): Store=f0c7eee6eaa95dc6e646f1875aa0645d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:16:07,707 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/default/TestLogRolling-testLogRollOnPipelineRestart/f0c7eee6eaa95dc6e646f1875aa0645d 2024-12-01T18:16:07,707 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/default/TestLogRolling-testLogRollOnPipelineRestart/f0c7eee6eaa95dc6e646f1875aa0645d 2024-12-01T18:16:07,709 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for f0c7eee6eaa95dc6e646f1875aa0645d 2024-12-01T18:16:07,711 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/default/TestLogRolling-testLogRollOnPipelineRestart/f0c7eee6eaa95dc6e646f1875aa0645d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:16:07,712 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened f0c7eee6eaa95dc6e646f1875aa0645d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=739052, jitterRate=-0.060246825218200684}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:16:07,713 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for f0c7eee6eaa95dc6e646f1875aa0645d: 2024-12-01T18:16:07,714 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d., pid=11, masterSystemTime=1733076967697 2024-12-01T18:16:07,716 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:07,716 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:07,716 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=f0c7eee6eaa95dc6e646f1875aa0645d, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,41671,1733076966029 2024-12-01T18:16:07,720 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-01T18:16:07,720 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure f0c7eee6eaa95dc6e646f1875aa0645d, server=b8365d49b74c,41671,1733076966029 in 173 msec 2024-12-01T18:16:07,723 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-01T18:16:07,723 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f0c7eee6eaa95dc6e646f1875aa0645d, ASSIGN in 330 msec 2024-12-01T18:16:07,723 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T18:16:07,724 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733076967723"}]},"ts":"1733076967723"} 2024-12-01T18:16:07,725 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-01T18:16:07,727 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T18:16:07,729 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 367 msec 2024-12-01T18:16:08,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:08,602 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-01T18:16:08,602 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-01T18:16:08,602 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-01T18:16:09,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:10,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:11,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:12,286 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-01T18:16:12,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:12,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:12,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:12,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:12,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:12,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:12,306 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:12,324 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-01T18:16:12,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:13,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:14,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:15,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:16,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:17,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46671 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-01T18:16:17,365 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart, procId: 9 completed 2024-12-01T18:16:17,368 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-01T18:16:17,368 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:17,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:18,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:19,374 INFO [Time-limited test {}] wal.TestLogRolling(337): log.getCurrentFileName()): hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 2024-12-01T18:16:19,375 WARN [ResponseProcessor for block BP-572904914-172.17.0.2-1733076965265:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-572904914-172.17.0.2-1733076965265:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:19,375 WARN [ResponseProcessor for block BP-572904914-172.17.0.2-1733076965265:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-572904914-172.17.0.2-1733076965265:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:19,375 WARN [ResponseProcessor for block BP-572904914-172.17.0.2-1733076965265:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-572904914-172.17.0.2-1733076965265:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-572904914-172.17.0.2-1733076965265:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:36777,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:19,376 WARN [DataStreamer for file /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.meta.1733076966777.meta block BP-572904914-172.17.0.2-1733076965265:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-572904914-172.17.0.2-1733076965265:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36777,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK], DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36777,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK]) is bad. 2024-12-01T18:16:19,376 WARN [DataStreamer for file /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 block BP-572904914-172.17.0.2-1733076965265:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-572904914-172.17.0.2-1733076965265:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36777,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK], DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36777,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK]) is bad. 2024-12-01T18:16:19,376 WARN [DataStreamer for file /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/WALs/b8365d49b74c,46671,1733076965961/b8365d49b74c%2C46671%2C1733076965961.1733076966110 block BP-572904914-172.17.0.2-1733076965265:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-572904914-172.17.0.2-1733076965265:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK], DatanodeInfoWithStorage[127.0.0.1:36777,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36777,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK]) is bad. 2024-12-01T18:16:19,376 WARN [PacketResponder: BP-572904914-172.17.0.2-1733076965265:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36777] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:19,376 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1000386064_22 at /127.0.0.1:46286 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46286 dst: /127.0.0.1:41499 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:19,376 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1000386064_22 at /127.0.0.1:55366 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36777:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55366 dst: /127.0.0.1:36777 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:19,377 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1000386064_22 at /127.0.0.1:55380 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36777:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55380 dst: /127.0.0.1:36777 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:19,377 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1819056780_22 at /127.0.0.1:46262 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46262 dst: /127.0.0.1:41499 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:19,377 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1819056780_22 at /127.0.0.1:55348 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36777:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55348 dst: /127.0.0.1:36777 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:19,377 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1000386064_22 at /127.0.0.1:46300 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46300 dst: /127.0.0.1:41499 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:19,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2fde4d38{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:19,379 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@46eea223{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:16:19,379 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:16:19,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79424f0d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:16:19,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@471c148c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,STOPPED} 2024-12-01T18:16:19,382 WARN [BP-572904914-172.17.0.2-1733076965265 heartbeating to localhost/127.0.0.1:38703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:16:19,382 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:16:19,382 WARN [BP-572904914-172.17.0.2-1733076965265 heartbeating to localhost/127.0.0.1:38703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-572904914-172.17.0.2-1733076965265 (Datanode Uuid c52e09f9-85ef-4b8b-ac10-26eb94a71d50) service to localhost/127.0.0.1:38703 2024-12-01T18:16:19,382 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:16:19,383 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data3/current/BP-572904914-172.17.0.2-1733076965265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:19,383 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data4/current/BP-572904914-172.17.0.2-1733076965265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:19,383 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:16:19,391 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:16:19,394 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:16:19,395 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:16:19,395 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:16:19,395 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T18:16:19,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31be664c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:16:19,396 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2070a2a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:16:19,510 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4fd89e84{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/java.io.tmpdir/jetty-localhost-36753-hadoop-hdfs-3_4_1-tests_jar-_-any-13179288473532527076/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:19,510 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@783c49af{HTTP/1.1, (http/1.1)}{localhost:36753} 2024-12-01T18:16:19,510 INFO [Time-limited test {}] server.Server(415): Started @186804ms 2024-12-01T18:16:19,512 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:16:19,529 WARN [ResponseProcessor for block BP-572904914-172.17.0.2-1733076965265:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-572904914-172.17.0.2-1733076965265:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:19,530 WARN [ResponseProcessor for block BP-572904914-172.17.0.2-1733076965265:blk_1073741834_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-572904914-172.17.0.2-1733076965265:blk_1073741834_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:19,530 WARN [ResponseProcessor for block BP-572904914-172.17.0.2-1733076965265:blk_1073741833_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-572904914-172.17.0.2-1733076965265:blk_1073741833_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:19,530 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1000386064_22 at /127.0.0.1:39284 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39284 dst: /127.0.0.1:41499 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:19,530 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1000386064_22 at /127.0.0.1:39282 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39282 dst: /127.0.0.1:41499 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:19,530 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1819056780_22 at /127.0.0.1:39276 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39276 dst: /127.0.0.1:41499 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:19,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:19,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@48848c47{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:19,540 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@c987a9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:16:19,540 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:16:19,540 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@457fbe4e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:16:19,540 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b0b4f60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,STOPPED} 2024-12-01T18:16:19,542 WARN [BP-572904914-172.17.0.2-1733076965265 heartbeating to localhost/127.0.0.1:38703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:16:19,542 WARN [BP-572904914-172.17.0.2-1733076965265 heartbeating to localhost/127.0.0.1:38703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-572904914-172.17.0.2-1733076965265 (Datanode Uuid 20fd19ec-4c9e-4592-982f-e43420840404) service to localhost/127.0.0.1:38703 2024-12-01T18:16:19,542 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data1/current/BP-572904914-172.17.0.2-1733076965265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:19,542 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data2/current/BP-572904914-172.17.0.2-1733076965265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:19,543 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:16:19,543 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:16:19,543 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:16:19,563 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:16:19,570 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:16:19,573 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:16:19,573 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:16:19,573 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T18:16:19,576 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3deee880{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:16:19,577 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66f09d11{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:16:19,612 WARN [Thread-1092 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:16:19,615 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b1ba63067065c0d with lease ID 0xb2c22b15c708e83d: from storage DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d node DatanodeRegistration(127.0.0.1:38891, datanodeUuid=c52e09f9-85ef-4b8b-ac10-26eb94a71d50, infoPort=43107, infoSecurePort=0, ipcPort=34947, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:16:19,616 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b1ba63067065c0d with lease ID 0xb2c22b15c708e83d: from storage DS-3190f951-5c20-4004-81fa-2588c06a618b node DatanodeRegistration(127.0.0.1:38891, datanodeUuid=c52e09f9-85ef-4b8b-ac10-26eb94a71d50, infoPort=43107, infoSecurePort=0, ipcPort=34947, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:16:19,694 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@11f0c6f9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/java.io.tmpdir/jetty-localhost-45315-hadoop-hdfs-3_4_1-tests_jar-_-any-18377487464252112114/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:19,695 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e8e93a4{HTTP/1.1, (http/1.1)}{localhost:45315} 2024-12-01T18:16:19,695 INFO [Time-limited test {}] server.Server(415): Started @186989ms 2024-12-01T18:16:19,697 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:16:19,786 WARN [Thread-1123 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:16:19,789 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ff41078112a4f5f with lease ID 0xb2c22b15c708e83e: from storage DS-93693323-6788-4ac9-a6b2-0b88f3f20111 node DatanodeRegistration(127.0.0.1:33809, datanodeUuid=20fd19ec-4c9e-4592-982f-e43420840404, infoPort=44199, infoSecurePort=0, ipcPort=35927, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T18:16:19,789 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ff41078112a4f5f with lease ID 0xb2c22b15c708e83e: from storage DS-1d805745-55e5-42bd-b5cc-8f9549db2ff8 node DatanodeRegistration(127.0.0.1:33809, datanodeUuid=20fd19ec-4c9e-4592-982f-e43420840404, infoPort=44199, infoSecurePort=0, ipcPort=35927, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:16:20,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:20,715 INFO [Time-limited test {}] wal.TestLogRolling(349): Data Nodes restarted 2024-12-01T18:16:20,717 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-01T18:16:20,718 WARN [RS:0;b8365d49b74c:41671.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=5, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:20,718 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C41671%2C1733076966029:(num 1733076966414) roll requested 2024-12-01T18:16:20,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41671 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:20,719 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41671%2C1733076966029.1733076980718 2024-12-01T18:16:20,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41671 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53696 deadline: 1733076990718, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-01T18:16:20,724 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 newFile=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 2024-12-01T18:16:20,724 WARN [regionserver/b8365d49b74c:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-01T18:16:20,724 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 with entries=5, filesize=2.09 KB; new WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 2024-12-01T18:16:20,725 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44199:44199),(127.0.0.1/127.0.0.1:43107:43107)] 2024-12-01T18:16:20,725 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 is not closed yet, will try archiving it next time 2024-12-01T18:16:20,725 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:20,725 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:20,725 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 2024-12-01T18:16:20,725 WARN [IPC Server handler 4 on default port 38703 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1017 2024-12-01T18:16:20,726 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 after 1ms 2024-12-01T18:16:21,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:22,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:23,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:23,615 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1017: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-01T18:16:24,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:24,726 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 after 4001ms 2024-12-01T18:16:25,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:26,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:27,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:28,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:29,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:30,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:31,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:32,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:32,815 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-01T18:16:33,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:34,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:34,818 WARN [ResponseProcessor for block BP-572904914-172.17.0.2-1733076965265:blk_1073741839_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-572904914-172.17.0.2-1733076965265:blk_1073741839_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:34,818 WARN [DataStreamer for file /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 block BP-572904914-172.17.0.2-1733076965265:blk_1073741839_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-572904914-172.17.0.2-1733076965265:blk_1073741839_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33809,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK], DatanodeInfoWithStorage[127.0.0.1:38891,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33809,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]) is bad. 2024-12-01T18:16:34,819 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1000386064_22 at /127.0.0.1:38960 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38960 dst: /127.0.0.1:33809 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:34,819 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1000386064_22 at /127.0.0.1:60162 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:38891:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60162 dst: /127.0.0.1:38891 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:34,820 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@11f0c6f9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:34,821 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e8e93a4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:16:34,821 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:16:34,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66f09d11{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:16:34,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3deee880{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,STOPPED} 2024-12-01T18:16:34,823 WARN [BP-572904914-172.17.0.2-1733076965265 heartbeating to localhost/127.0.0.1:38703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:16:34,823 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:16:34,823 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:16:34,823 WARN [BP-572904914-172.17.0.2-1733076965265 heartbeating to localhost/127.0.0.1:38703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-572904914-172.17.0.2-1733076965265 (Datanode Uuid 20fd19ec-4c9e-4592-982f-e43420840404) service to localhost/127.0.0.1:38703 2024-12-01T18:16:34,824 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data1/current/BP-572904914-172.17.0.2-1733076965265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:34,824 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data2/current/BP-572904914-172.17.0.2-1733076965265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:34,824 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:16:34,834 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:16:34,839 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:16:34,840 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:16:34,840 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:16:34,840 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:16:34,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d99b89f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:16:34,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bc459ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:16:34,959 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44dc177c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/java.io.tmpdir/jetty-localhost-45337-hadoop-hdfs-3_4_1-tests_jar-_-any-233709400818488236/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:34,960 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a5cbb85{HTTP/1.1, (http/1.1)}{localhost:45337} 2024-12-01T18:16:34,960 INFO [Time-limited test {}] server.Server(415): Started @202253ms 2024-12-01T18:16:34,961 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:16:34,987 WARN [ResponseProcessor for block BP-572904914-172.17.0.2-1733076965265:blk_1073741839_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-572904914-172.17.0.2-1733076965265:blk_1073741839_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:34,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1000386064_22 at /127.0.0.1:35556 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:38891:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35556 dst: /127.0.0.1:38891 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:34,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4fd89e84{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:34,993 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@783c49af{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:16:34,993 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:16:34,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2070a2a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:16:34,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31be664c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,STOPPED} 2024-12-01T18:16:34,995 WARN [BP-572904914-172.17.0.2-1733076965265 heartbeating to localhost/127.0.0.1:38703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:16:34,995 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:16:34,995 WARN [BP-572904914-172.17.0.2-1733076965265 heartbeating to localhost/127.0.0.1:38703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-572904914-172.17.0.2-1733076965265 (Datanode Uuid c52e09f9-85ef-4b8b-ac10-26eb94a71d50) service to localhost/127.0.0.1:38703 2024-12-01T18:16:34,995 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:16:34,996 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data3/current/BP-572904914-172.17.0.2-1733076965265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:34,996 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data4/current/BP-572904914-172.17.0.2-1733076965265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:34,996 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:16:35,013 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:16:35,016 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:16:35,017 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:16:35,018 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:16:35,018 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:16:35,019 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71144b43{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:16:35,019 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32bf2b88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:16:35,075 WARN [Thread-1167 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:16:35,077 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f1121d7d41d36bc with lease ID 0xb2c22b15c708e83f: from storage DS-93693323-6788-4ac9-a6b2-0b88f3f20111 node DatanodeRegistration(127.0.0.1:46407, datanodeUuid=20fd19ec-4c9e-4592-982f-e43420840404, infoPort=34479, infoSecurePort=0, ipcPort=35183, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:16:35,078 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f1121d7d41d36bc with lease ID 0xb2c22b15c708e83f: from storage DS-1d805745-55e5-42bd-b5cc-8f9549db2ff8 node DatanodeRegistration(127.0.0.1:46407, datanodeUuid=20fd19ec-4c9e-4592-982f-e43420840404, infoPort=34479, infoSecurePort=0, ipcPort=35183, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265), blocks: 8, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T18:16:35,143 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4219ce32{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/java.io.tmpdir/jetty-localhost-43153-hadoop-hdfs-3_4_1-tests_jar-_-any-7151236977910031162/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:35,144 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@463f37b2{HTTP/1.1, (http/1.1)}{localhost:43153} 2024-12-01T18:16:35,144 INFO [Time-limited test {}] server.Server(415): Started @202437ms 2024-12-01T18:16:35,145 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:16:35,235 WARN [Thread-1198 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:16:35,238 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf7c8845379f0e217 with lease ID 0xb2c22b15c708e840: from storage DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d node DatanodeRegistration(127.0.0.1:45755, datanodeUuid=c52e09f9-85ef-4b8b-ac10-26eb94a71d50, infoPort=35219, infoSecurePort=0, ipcPort=34917, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:16:35,239 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf7c8845379f0e217 with lease ID 0xb2c22b15c708e840: from storage DS-3190f951-5c20-4004-81fa-2588c06a618b node DatanodeRegistration(127.0.0.1:45755, datanodeUuid=c52e09f9-85ef-4b8b-ac10-26eb94a71d50, infoPort=35219, infoSecurePort=0, ipcPort=34917, storageInfo=lv=-57;cid=testClusterID;nsid=1243741022;c=1733076965265), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T18:16:35,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:35,938 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T18:16:36,165 INFO [Time-limited test {}] wal.TestLogRolling(366): Data Nodes restarted 2024-12-01T18:16:36,167 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-01T18:16:36,168 WARN [RS:0;b8365d49b74c:41671.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=8, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38891,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:36,168 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C41671%2C1733076966029:(num 1733076980718) roll requested 2024-12-01T18:16:36,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41671 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38891,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:36,169 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41671%2C1733076966029.1733076996169 2024-12-01T18:16:36,169 WARN [master/b8365d49b74c:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=95, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:36,169 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C46671%2C1733076965961:(num 1733076966110) roll requested 2024-12-01T18:16:36,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41671 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53696 deadline: 1733077006168, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-01T18:16:36,169 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:36,169 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C46671%2C1733076965961.1733076996169 2024-12-01T18:16:36,169 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:36,177 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 newFile=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076996169 2024-12-01T18:16:36,177 WARN [regionserver/b8365d49b74c:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-01T18:16:36,178 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL 2024-12-01T18:16:36,178 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076996169 2024-12-01T18:16:36,178 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/WALs/b8365d49b74c,46671,1733076965961/b8365d49b74c%2C46671%2C1733076965961.1733076966110 with entries=92, filesize=45.99 KB; new WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/WALs/b8365d49b74c,46671,1733076965961/b8365d49b74c%2C46671%2C1733076965961.1733076996169 2024-12-01T18:16:36,178 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34479:34479),(127.0.0.1/127.0.0.1:35219:35219)] 2024-12-01T18:16:36,178 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 is not closed yet, will try archiving it next time 2024-12-01T18:16:36,178 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38891,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:36,178 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35219:35219),(127.0.0.1/127.0.0.1:34479:34479)] 2024-12-01T18:16:36,178 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/WALs/b8365d49b74c,46671,1733076965961/b8365d49b74c%2C46671%2C1733076965961.1733076966110 is not closed yet, will try archiving it next time 2024-12-01T18:16:36,178 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38891,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:36,178 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:36,178 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 2024-12-01T18:16:36,178 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:36,178 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/WALs/b8365d49b74c,46671,1733076965961/b8365d49b74c%2C46671%2C1733076965961.1733076966110 2024-12-01T18:16:36,178 WARN [IPC Server handler 0 on default port 38703 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 has not been closed. Lease recovery is in progress. RecoveryId = 1023 for block blk_1073741839_1020 2024-12-01T18:16:36,179 WARN [IPC Server handler 4 on default port 38703 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/WALs/b8365d49b74c,46671,1733076965961/b8365d49b74c%2C46671%2C1733076965961.1733076966110 has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741830_1015 2024-12-01T18:16:36,179 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 after 1ms 2024-12-01T18:16:36,179 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/WALs/b8365d49b74c,46671,1733076965961/b8365d49b74c%2C46671%2C1733076965961.1733076966110 after 1ms 2024-12-01T18:16:36,260 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:16:36,262 INFO [RS-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58986, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:16:36,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:37,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:38,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:39,078 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741839_1020: GenerationStamp not matched, existing replica is blk_1073741839_1018 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-01T18:16:39,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:40,179 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 after 4001ms 2024-12-01T18:16:40,179 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/WALs/b8365d49b74c,46671,1733076965961/b8365d49b74c%2C46671%2C1733076965961.1733076966110 after 4001ms 2024-12-01T18:16:40,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:41,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:42,239 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-01T18:16:42,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:43,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:44,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:45,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:46,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:47,326 INFO [master/b8365d49b74c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-01T18:16:47,326 INFO [master/b8365d49b74c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-01T18:16:47,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:48,193 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41671%2C1733076966029.1733077008193 2024-12-01T18:16:48,199 DEBUG [Time-limited test {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076996169 newFile=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 2024-12-01T18:16:48,200 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076996169 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 2024-12-01T18:16:48,200 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35219:35219),(127.0.0.1/127.0.0.1:34479:34479)] 2024-12-01T18:16:48,200 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076996169 is not closed yet, will try archiving it next time 2024-12-01T18:16:48,201 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 2024-12-01T18:16:48,201 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 2024-12-01T18:16:48,201 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 after 0ms 2024-12-01T18:16:48,201 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 2024-12-01T18:16:48,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45755 is added to blk_1073741840_1021 (size=1264) 2024-12-01T18:16:48,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741840_1021 (size=1264) 2024-12-01T18:16:48,210 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733076967172/Put/vlen=162/seqid=0] 2024-12-01T18:16:48,210 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [default/info:d/1733076967232/Put/vlen=9/seqid=0] 2024-12-01T18:16:48,211 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #5: [hbase/info:d/1733076967253/Put/vlen=7/seqid=0] 2024-12-01T18:16:48,211 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733076967713/Put/vlen=218/seqid=0] 2024-12-01T18:16:48,211 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [row1002/info:/1733076977372/Put/vlen=1045/seqid=0] 2024-12-01T18:16:48,211 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076966414 2024-12-01T18:16:48,211 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 2024-12-01T18:16:48,211 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 2024-12-01T18:16:48,212 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 after 1ms 2024-12-01T18:16:48,212 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 2024-12-01T18:16:48,215 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #6: [row1003/info:/1733076990812/Put/vlen=1045/seqid=0] 2024-12-01T18:16:48,216 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #7: [row1004/info:/1733076992816/Put/vlen=1045/seqid=0] 2024-12-01T18:16:48,216 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076980718 2024-12-01T18:16:48,216 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076996169 2024-12-01T18:16:48,216 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076996169 2024-12-01T18:16:48,216 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076996169 after 0ms 2024-12-01T18:16:48,216 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733076996169 2024-12-01T18:16:48,220 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #9: [row1005/info:/1733077006191/Put/vlen=1045/seqid=0] 2024-12-01T18:16:48,220 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 2024-12-01T18:16:48,220 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 2024-12-01T18:16:48,221 WARN [IPC Server handler 2 on default port 38703 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741842_1025 2024-12-01T18:16:48,221 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 after 1ms 2024-12-01T18:16:48,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:49,082 WARN [ResponseProcessor for block BP-572904914-172.17.0.2-1733076965265:blk_1073741842_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-572904914-172.17.0.2-1733076965265:blk_1073741842_1025 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:49,082 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1819056780_22 at /127.0.0.1:35746 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:45755:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35746 dst: /127.0.0.1:45755 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45755 remote=/127.0.0.1:35746]. Total timeout mills is 60000, 59116 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:49,082 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1819056780_22 at /127.0.0.1:51434 [Receiving block BP-572904914-172.17.0.2-1733076965265:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:46407:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51434 dst: /127.0.0.1:46407 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:16:49,082 WARN [DataStreamer for file /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 block BP-572904914-172.17.0.2-1733076965265:blk_1073741842_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-572904914-172.17.0.2-1733076965265:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45755,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK], DatanodeInfoWithStorage[127.0.0.1:46407,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45755,DS-6668352a-ba38-4cbb-b2b8-bd577d64d70d,DISK]) is bad. 2024-12-01T18:16:49,087 WARN [DataStreamer for file /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 block BP-572904914-172.17.0.2-1733076965265:blk_1073741842_1025 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-572904914-172.17.0.2-1733076965265:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:49,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45755 is added to blk_1073741842_1026 (size=85) 2024-12-01T18:16:49,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:50,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:51,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:52,222 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 after 4002ms 2024-12-01T18:16:52,222 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 2024-12-01T18:16:52,226 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 2024-12-01T18:16:52,227 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 55a8e2f83dea527a3f6dc58337aa0768 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-01T18:16:52,227 WARN [RS:0;b8365d49b74c:41671.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=7, requesting roll of WAL org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-572904914-172.17.0.2-1733076965265:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:52,228 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C41671%2C1733076966029:(num 1733077008193) roll requested 2024-12-01T18:16:52,228 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 55a8e2f83dea527a3f6dc58337aa0768: 2024-12-01T18:16:52,228 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41671%2C1733076966029.1733077012228 2024-12-01T18:16:52,228 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-572904914-172.17.0.2-1733076965265:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:52,228 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing f0c7eee6eaa95dc6e646f1875aa0645d 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-01T18:16:52,229 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for f0c7eee6eaa95dc6e646f1875aa0645d: 2024-12-01T18:16:52,229 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-572904914-172.17.0.2-1733076965265:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:52,229 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.42 KB 2024-12-01T18:16:52,229 WARN [RS_OPEN_META-regionserver/b8365d49b74c:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:52,230 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-01T18:16:52,230 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:52,232 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-01T18:16:52,232 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-01T18:16:52,233 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65f88687 to 127.0.0.1:56873 2024-12-01T18:16:52,233 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:16:52,233 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T18:16:52,233 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=950860074, stopped=false 2024-12-01T18:16:52,233 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=b8365d49b74c,46671,1733076965961 2024-12-01T18:16:52,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:16:52,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:52,235 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-01T18:16:52,235 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:16:52,235 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b8365d49b74c,41671,1733076966029' ***** 2024-12-01T18:16:52,235 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-01T18:16:52,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:16:52,236 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:16:52,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:52,236 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T18:16:52,236 INFO [RS:0;b8365d49b74c:41671 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T18:16:52,236 INFO [RS:0;b8365d49b74c:41671 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T18:16:52,236 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-01T18:16:52,236 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(3579): Received CLOSE for 55a8e2f83dea527a3f6dc58337aa0768 2024-12-01T18:16:52,236 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 newFile=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077012228 2024-12-01T18:16:52,236 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:16:52,237 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(3579): Received CLOSE for f0c7eee6eaa95dc6e646f1875aa0645d 2024-12-01T18:16:52,237 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1224): stopping server b8365d49b74c,41671,1733076966029 2024-12-01T18:16:52,237 DEBUG [RS:0;b8365d49b74c:41671 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:16:52,237 WARN [regionserver/b8365d49b74c:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL 2024-12-01T18:16:52,237 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T18:16:52,237 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 55a8e2f83dea527a3f6dc58337aa0768, disabling compactions & flushes 2024-12-01T18:16:52,237 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077012228 2024-12-01T18:16:52,237 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T18:16:52,237 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T18:16:52,237 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. 2024-12-01T18:16:52,237 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-01T18:16:52,237 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. 2024-12-01T18:16:52,237 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35219:35219),(127.0.0.1/127.0.0.1:34479:34479)] 2024-12-01T18:16:52,237 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. after waiting 0 ms 2024-12-01T18:16:52,237 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. 2024-12-01T18:16:52,237 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 is not closed yet, will try archiving it next time 2024-12-01T18:16:52,237 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 55a8e2f83dea527a3f6dc58337aa0768 1/1 column families, dataSize=78 B heapSize=728 B 2024-12-01T18:16:52,237 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b8365d49b74c%2C41671%2C1733076966029.meta:.meta(num 1733076966777) roll requested 2024-12-01T18:16:52,237 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-01T18:16:52,237 DEBUG [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1603): Online Regions={55a8e2f83dea527a3f6dc58337aa0768=hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768., f0c7eee6eaa95dc6e646f1875aa0645d=TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d., 1588230740=hbase:meta,,1.1588230740} 2024-12-01T18:16:52,237 WARN [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-01T18:16:52,237 DEBUG [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 55a8e2f83dea527a3f6dc58337aa0768, f0c7eee6eaa95dc6e646f1875aa0645d 2024-12-01T18:16:52,237 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:16:52,237 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-572904914-172.17.0.2-1733076965265:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:52,237 INFO [regionserver/b8365d49b74c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41671%2C1733076966029.meta.1733077012237.meta 2024-12-01T18:16:52,237 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:16:52,237 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:16:52,238 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:16:52,238 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:16:52,238 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.89 KB 2024-12-01T18:16:52,238 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-572904914-172.17.0.2-1733076965265:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:52,238 WARN [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-01T18:16:52,238 WARN [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-01T18:16:52,238 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 2024-12-01T18:16:52,238 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:16:52,238 ERROR [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2808): ***** ABORTING region server b8365d49b74c,41671,1733076966029: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:52,238 ERROR [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2815): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-01T18:16:52,238 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-01T18:16:52,239 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 after 0ms 2024-12-01T18:16:52,239 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.1733077008193 to hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/oldWALs/b8365d49b74c%2C41671%2C1733076966029.1733077008193 2024-12-01T18:16:52,239 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-01T18:16:52,240 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-01T18:16:52,240 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-01T18:16:52,240 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2819): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 634831424 }, "NonHeapMemoryUsage": { "committed": 168886272, "init": 7667712, "max": -1, "used": 167048632 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-12-01T18:16:52,240 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46671 {}] master.MasterRpcServices(626): b8365d49b74c,41671,1733076966029 reported a fatal error: ***** ABORTING region server b8365d49b74c,41671,1733076966029: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** Cause: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) 2024-12-01T18:16:52,249 WARN [regionserver/b8365d49b74c:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-01T18:16:52,249 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.meta.1733076966777.meta with entries=11, filesize=3.66 KB; new WAL /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.meta.1733077012237.meta 2024-12-01T18:16:52,250 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34479:34479),(127.0.0.1/127.0.0.1:35219:35219)] 2024-12-01T18:16:52,250 DEBUG [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.meta.1733076966777.meta is not closed yet, will try archiving it next time 2024-12-01T18:16:52,250 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:52,250 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41499,DS-93693323-6788-4ac9-a6b2-0b88f3f20111,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-01T18:16:52,250 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.meta.1733076966777.meta 2024-12-01T18:16:52,250 WARN [IPC Server handler 4 on default port 38703 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.meta.1733076966777.meta has not been closed. Lease recovery is in progress. RecoveryId = 1029 for block blk_1073741834_1016 2024-12-01T18:16:52,251 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.meta.1733076966777.meta after 1ms 2024-12-01T18:16:52,259 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/namespace/55a8e2f83dea527a3f6dc58337aa0768/.tmp/info/dad5d8755c1b4e0b9ef5be9961102e73 is 45, key is default/info:d/1733076967232/Put/seqid=0 2024-12-01T18:16:52,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45755 is added to blk_1073741845_1030 (size=5037) 2024-12-01T18:16:52,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741845_1030 (size=5037) 2024-12-01T18:16:52,265 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/namespace/55a8e2f83dea527a3f6dc58337aa0768/.tmp/info/dad5d8755c1b4e0b9ef5be9961102e73 2024-12-01T18:16:52,272 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/namespace/55a8e2f83dea527a3f6dc58337aa0768/.tmp/info/dad5d8755c1b4e0b9ef5be9961102e73 as hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/namespace/55a8e2f83dea527a3f6dc58337aa0768/info/dad5d8755c1b4e0b9ef5be9961102e73 2024-12-01T18:16:52,276 INFO [regionserver/b8365d49b74c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-01T18:16:52,276 INFO [regionserver/b8365d49b74c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-01T18:16:52,277 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/namespace/55a8e2f83dea527a3f6dc58337aa0768/info/dad5d8755c1b4e0b9ef5be9961102e73, entries=2, sequenceid=8, filesize=4.9 K 2024-12-01T18:16:52,278 INFO [regionserver/b8365d49b74c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:16:52,278 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 55a8e2f83dea527a3f6dc58337aa0768 in 41ms, sequenceid=8, compaction requested=false 2024-12-01T18:16:52,282 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/data/hbase/namespace/55a8e2f83dea527a3f6dc58337aa0768/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-01T18:16:52,283 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. 2024-12-01T18:16:52,283 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 55a8e2f83dea527a3f6dc58337aa0768: 2024-12-01T18:16:52,283 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733076966819.55a8e2f83dea527a3f6dc58337aa0768. 2024-12-01T18:16:52,283 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing f0c7eee6eaa95dc6e646f1875aa0645d, disabling compactions & flushes 2024-12-01T18:16:52,283 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:52,283 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:52,283 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. after waiting 0 ms 2024-12-01T18:16:52,283 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:52,283 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for f0c7eee6eaa95dc6e646f1875aa0645d: 2024-12-01T18:16:52,283 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:52,438 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(3579): Received CLOSE for f0c7eee6eaa95dc6e646f1875aa0645d 2024-12-01T18:16:52,438 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-01T18:16:52,438 DEBUG [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, f0c7eee6eaa95dc6e646f1875aa0645d 2024-12-01T18:16:52,438 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:16:52,438 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing f0c7eee6eaa95dc6e646f1875aa0645d, disabling compactions & flushes 2024-12-01T18:16:52,438 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:52,438 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:16:52,438 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:52,438 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:16:52,438 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. after waiting 0 ms 2024-12-01T18:16:52,438 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:16:52,438 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:52,438 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:16:52,438 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:16:52,438 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for f0c7eee6eaa95dc6e646f1875aa0645d: 2024-12-01T18:16:52,438 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing TestLogRolling-testLogRollOnPipelineRestart,,1733076967360.f0c7eee6eaa95dc6e646f1875aa0645d. 2024-12-01T18:16:52,438 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:meta,,1.1588230740 2024-12-01T18:16:52,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:52,638 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1624): We were exiting though online regions are not empty, because some regions failed closing 2024-12-01T18:16:52,638 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1250): stopping server b8365d49b74c,41671,1733076966029; all regions closed. 2024-12-01T18:16:52,639 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029 2024-12-01T18:16:52,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45755 is added to blk_1073741844_1028 (size=93) 2024-12-01T18:16:52,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741844_1028 (size=93) 2024-12-01T18:16:53,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:54,239 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1016: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-01T18:16:54,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:55,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:56,251 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029/b8365d49b74c%2C41671%2C1733076966029.meta.1733076966777.meta after 4001ms 2024-12-01T18:16:56,252 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/WALs/b8365d49b74c,41671,1733076966029 2024-12-01T18:16:56,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741843_1027 (size=910) 2024-12-01T18:16:56,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45755 is added to blk_1073741843_1027 (size=910) 2024-12-01T18:16:56,255 DEBUG [RS:0;b8365d49b74c:41671 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:16:56,255 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:16:56,255 INFO [RS:0;b8365d49b74c:41671 {}] hbase.ChoreService(370): Chore service for: regionserver/b8365d49b74c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-01T18:16:56,255 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-01T18:16:56,255 INFO [RS:0;b8365d49b74c:41671 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41671 2024-12-01T18:16:56,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b8365d49b74c,41671,1733076966029 2024-12-01T18:16:56,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:16:56,258 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b8365d49b74c,41671,1733076966029] 2024-12-01T18:16:56,258 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b8365d49b74c,41671,1733076966029; numProcessing=1 2024-12-01T18:16:56,260 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b8365d49b74c,41671,1733076966029 already deleted, retry=false 2024-12-01T18:16:56,260 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b8365d49b74c,41671,1733076966029 expired; onlineServers=0 2024-12-01T18:16:56,260 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b8365d49b74c,46671,1733076965961' ***** 2024-12-01T18:16:56,260 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T18:16:56,260 DEBUG [M:0;b8365d49b74c:46671 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@941d80d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b8365d49b74c/172.17.0.2:0 2024-12-01T18:16:56,260 INFO [M:0;b8365d49b74c:46671 {}] regionserver.HRegionServer(1224): stopping server b8365d49b74c,46671,1733076965961 2024-12-01T18:16:56,260 INFO [M:0;b8365d49b74c:46671 {}] regionserver.HRegionServer(1250): stopping server b8365d49b74c,46671,1733076965961; all regions closed. 2024-12-01T18:16:56,260 DEBUG [M:0;b8365d49b74c:46671 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:16:56,260 DEBUG [M:0;b8365d49b74c:46671 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T18:16:56,260 DEBUG [M:0;b8365d49b74c:46671 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T18:16:56,260 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T18:16:56,260 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733076966170 {}] cleaner.HFileCleaner(306): Exit Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733076966170,5,FailOnTimeoutGroup] 2024-12-01T18:16:56,260 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733076966170 {}] cleaner.HFileCleaner(306): Exit Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733076966170,5,FailOnTimeoutGroup] 2024-12-01T18:16:56,261 INFO [M:0;b8365d49b74c:46671 {}] hbase.ChoreService(370): Chore service for: master/b8365d49b74c:0 had [] on shutdown 2024-12-01T18:16:56,261 DEBUG [M:0;b8365d49b74c:46671 {}] master.HMaster(1733): Stopping service threads 2024-12-01T18:16:56,261 INFO [M:0;b8365d49b74c:46671 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T18:16:56,261 INFO [M:0;b8365d49b74c:46671 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T18:16:56,261 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T18:16:56,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T18:16:56,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:56,262 DEBUG [M:0;b8365d49b74c:46671 {}] zookeeper.ZKUtil(347): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-01T18:16:56,262 WARN [M:0;b8365d49b74c:46671 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-01T18:16:56,262 INFO [M:0;b8365d49b74c:46671 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-01T18:16:56,262 INFO [M:0;b8365d49b74c:46671 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T18:16:56,262 DEBUG [M:0;b8365d49b74c:46671 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:16:56,262 INFO [M:0;b8365d49b74c:46671 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:56,262 DEBUG [M:0;b8365d49b74c:46671 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:56,262 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:16:56,262 DEBUG [M:0;b8365d49b74c:46671 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:16:56,262 DEBUG [M:0;b8365d49b74c:46671 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:56,262 INFO [M:0;b8365d49b74c:46671 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.09 KB heapSize=49.26 KB 2024-12-01T18:16:56,281 DEBUG [M:0;b8365d49b74c:46671 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2b7452adc1b84c57b73fdb5ddbba6d70 is 82, key is hbase:meta,,1/info:regioninfo/1733076966799/Put/seqid=0 2024-12-01T18:16:56,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741846_1031 (size=5672) 2024-12-01T18:16:56,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45755 is added to blk_1073741846_1031 (size=5672) 2024-12-01T18:16:56,287 INFO [M:0;b8365d49b74c:46671 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2b7452adc1b84c57b73fdb5ddbba6d70 2024-12-01T18:16:56,308 DEBUG [M:0;b8365d49b74c:46671 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/739a9f6c29314ec7b27a46f7d9ec7026 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733076967728/Put/seqid=0 2024-12-01T18:16:56,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45755 is added to blk_1073741847_1032 (size=7469) 2024-12-01T18:16:56,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741847_1032 (size=7469) 2024-12-01T18:16:56,314 INFO [M:0;b8365d49b74c:46671 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.49 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/739a9f6c29314ec7b27a46f7d9ec7026 2024-12-01T18:16:56,334 DEBUG [M:0;b8365d49b74c:46671 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d3cc6197d3d447f290fdeed6beb86606 is 69, key is b8365d49b74c,41671,1733076966029/rs:state/1733076966264/Put/seqid=0 2024-12-01T18:16:56,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741848_1033 (size=5156) 2024-12-01T18:16:56,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45755 is added to blk_1073741848_1033 (size=5156) 2024-12-01T18:16:56,341 INFO [M:0;b8365d49b74c:46671 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d3cc6197d3d447f290fdeed6beb86606 2024-12-01T18:16:56,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:16:56,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x1004ecb931b0001, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:16:56,359 INFO [RS:0;b8365d49b74c:41671 {}] regionserver.HRegionServer(1307): Exiting; stopping=b8365d49b74c,41671,1733076966029; zookeeper connection closed. 2024-12-01T18:16:56,359 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@40fc47e6 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@40fc47e6 2024-12-01T18:16:56,359 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-01T18:16:56,367 DEBUG [M:0;b8365d49b74c:46671 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4157a9352384417b8c6c5266f756fea0 is 52, key is load_balancer_on/state:d/1733076967354/Put/seqid=0 2024-12-01T18:16:56,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741849_1034 (size=5056) 2024-12-01T18:16:56,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45755 is added to blk_1073741849_1034 (size=5056) 2024-12-01T18:16:56,373 INFO [M:0;b8365d49b74c:46671 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4157a9352384417b8c6c5266f756fea0 2024-12-01T18:16:56,379 DEBUG [M:0;b8365d49b74c:46671 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2b7452adc1b84c57b73fdb5ddbba6d70 as hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2b7452adc1b84c57b73fdb5ddbba6d70 2024-12-01T18:16:56,385 INFO [M:0;b8365d49b74c:46671 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2b7452adc1b84c57b73fdb5ddbba6d70, entries=8, sequenceid=96, filesize=5.5 K 2024-12-01T18:16:56,386 DEBUG [M:0;b8365d49b74c:46671 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/739a9f6c29314ec7b27a46f7d9ec7026 as hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/739a9f6c29314ec7b27a46f7d9ec7026 2024-12-01T18:16:56,391 INFO [M:0;b8365d49b74c:46671 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/739a9f6c29314ec7b27a46f7d9ec7026, entries=11, sequenceid=96, filesize=7.3 K 2024-12-01T18:16:56,392 DEBUG [M:0;b8365d49b74c:46671 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d3cc6197d3d447f290fdeed6beb86606 as hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d3cc6197d3d447f290fdeed6beb86606 2024-12-01T18:16:56,397 INFO [M:0;b8365d49b74c:46671 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d3cc6197d3d447f290fdeed6beb86606, entries=1, sequenceid=96, filesize=5.0 K 2024-12-01T18:16:56,398 DEBUG [M:0;b8365d49b74c:46671 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4157a9352384417b8c6c5266f756fea0 as hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4157a9352384417b8c6c5266f756fea0 2024-12-01T18:16:56,403 INFO [M:0;b8365d49b74c:46671 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38703/user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4157a9352384417b8c6c5266f756fea0, entries=1, sequenceid=96, filesize=4.9 K 2024-12-01T18:16:56,405 INFO [M:0;b8365d49b74c:46671 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.09 KB/41052, heapSize ~49.20 KB/50376, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=96, compaction requested=false 2024-12-01T18:16:56,406 INFO [M:0;b8365d49b74c:46671 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:56,406 DEBUG [M:0;b8365d49b74c:46671 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:16:56,406 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/5c52bc67-ea90-0b77-f709-a1b37bd2d355/MasterData/WALs/b8365d49b74c,46671,1733076965961 2024-12-01T18:16:56,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46407 is added to blk_1073741841_1022 (size=757) 2024-12-01T18:16:56,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45755 is added to blk_1073741841_1022 (size=757) 2024-12-01T18:16:56,409 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-01T18:16:56,409 INFO [M:0;b8365d49b74c:46671 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-01T18:16:56,409 INFO [M:0;b8365d49b74c:46671 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46671 2024-12-01T18:16:56,411 DEBUG [M:0;b8365d49b74c:46671 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/b8365d49b74c,46671,1733076965961 already deleted, retry=false 2024-12-01T18:16:56,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:16:56,513 INFO [M:0;b8365d49b74c:46671 {}] regionserver.HRegionServer(1307): Exiting; stopping=b8365d49b74c,46671,1733076965961; zookeeper connection closed. 2024-12-01T18:16:56,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46671-0x1004ecb931b0000, quorum=127.0.0.1:56873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:16:56,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4219ce32{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:56,516 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@463f37b2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:16:56,516 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:16:56,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32bf2b88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:16:56,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71144b43{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,STOPPED} 2024-12-01T18:16:56,518 WARN [BP-572904914-172.17.0.2-1733076965265 heartbeating to localhost/127.0.0.1:38703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:16:56,518 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:16:56,518 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:16:56,518 WARN [BP-572904914-172.17.0.2-1733076965265 heartbeating to localhost/127.0.0.1:38703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-572904914-172.17.0.2-1733076965265 (Datanode Uuid c52e09f9-85ef-4b8b-ac10-26eb94a71d50) service to localhost/127.0.0.1:38703 2024-12-01T18:16:56,519 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data3/current/BP-572904914-172.17.0.2-1733076965265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:56,519 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data4/current/BP-572904914-172.17.0.2-1733076965265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:56,519 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:16:56,521 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44dc177c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:56,521 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a5cbb85{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:16:56,521 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:16:56,521 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bc459ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:16:56,522 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d99b89f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,STOPPED} 2024-12-01T18:16:56,523 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:16:56,523 WARN [BP-572904914-172.17.0.2-1733076965265 heartbeating to localhost/127.0.0.1:38703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:16:56,523 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:16:56,523 WARN [BP-572904914-172.17.0.2-1733076965265 heartbeating to localhost/127.0.0.1:38703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-572904914-172.17.0.2-1733076965265 (Datanode Uuid 20fd19ec-4c9e-4592-982f-e43420840404) service to localhost/127.0.0.1:38703 2024-12-01T18:16:56,524 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data1/current/BP-572904914-172.17.0.2-1733076965265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:56,524 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/cluster_ed1cd600-d909-e943-433e-9a17d8eca7e1/dfs/data/data2/current/BP-572904914-172.17.0.2-1733076965265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:16:56,525 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:16:56,531 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24cb120{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:16:56,532 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5c1f50f5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:16:56,532 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:16:56,532 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@611c74c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:16:56,532 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72060fdc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir/,STOPPED} 2024-12-01T18:16:56,538 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-01T18:16:56,554 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-01T18:16:56,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:56,562 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=100 (was 86) Potentially hanging thread: nioEventLoopGroup-27-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38703 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1454462474) connection to localhost/127.0.0.1:38703 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-27-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38703 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1454462474) connection to localhost/127.0.0.1:38703 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-29-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1454462474) connection to localhost/127.0.0.1:38703 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38703 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38703 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:38703 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=444 (was 430) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=131 (was 171), ProcessCount=11 (was 11), AvailableMemoryMB=3192 (was 3434) 2024-12-01T18:16:56,569 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=100, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=131, ProcessCount=11, AvailableMemoryMB=3192 2024-12-01T18:16:56,569 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T18:16:56,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.log.dir so I do NOT create it in target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34 2024-12-01T18:16:56,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/dd3f442e-ed49-ef4c-0c66-b3b4a489f075/hadoop.tmp.dir so I do NOT create it in target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34 2024-12-01T18:16:56,570 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/cluster_ce066afb-d063-e204-6b1a-430c3fce4fab, deleteOnExit=true 2024-12-01T18:16:56,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-01T18:16:56,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/test.cache.data in system properties and HBase conf 2024-12-01T18:16:56,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T18:16:56,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/hadoop.log.dir in system properties and HBase conf 2024-12-01T18:16:56,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T18:16:56,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T18:16:56,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-01T18:16:56,570 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T18:16:56,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:16:56,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:16:56,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T18:16:56,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:16:56,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T18:16:56,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T18:16:56,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:16:56,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:16:56,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T18:16:56,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/nfs.dump.dir in system properties and HBase conf 2024-12-01T18:16:56,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/java.io.tmpdir in system properties and HBase conf 2024-12-01T18:16:56,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:16:56,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T18:16:56,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T18:16:56,584 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-01T18:16:56,650 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:16:56,654 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:16:56,656 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:16:56,656 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:16:56,656 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:16:56,657 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:16:56,657 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cc661a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:16:56,657 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@233e6490{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:16:56,773 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4bb512f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/java.io.tmpdir/jetty-localhost-39931-hadoop-hdfs-3_4_1-tests_jar-_-any-924672607530933931/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:16:56,774 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ccc3bba{HTTP/1.1, (http/1.1)}{localhost:39931} 2024-12-01T18:16:56,774 INFO [Time-limited test {}] server.Server(415): Started @224067ms 2024-12-01T18:16:56,787 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-01T18:16:56,852 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:16:56,855 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:16:56,856 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:16:56,856 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:16:56,856 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:16:56,856 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@676eacdf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:16:56,856 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d38e2e3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:16:56,971 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@73a57b63{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/java.io.tmpdir/jetty-localhost-42719-hadoop-hdfs-3_4_1-tests_jar-_-any-2633138166395982843/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:56,972 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7158c214{HTTP/1.1, (http/1.1)}{localhost:42719} 2024-12-01T18:16:56,972 INFO [Time-limited test {}] server.Server(415): Started @224265ms 2024-12-01T18:16:56,973 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:16:57,011 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:16:57,018 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:16:57,019 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:16:57,019 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:16:57,019 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T18:16:57,020 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@152be98f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:16:57,020 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55215408{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:16:57,065 WARN [Thread-1369 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/cluster_ce066afb-d063-e204-6b1a-430c3fce4fab/dfs/data/data2/current/BP-569714203-172.17.0.2-1733077016602/current, will proceed with Du for space computation calculation, 2024-12-01T18:16:57,065 WARN [Thread-1368 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/cluster_ce066afb-d063-e204-6b1a-430c3fce4fab/dfs/data/data1/current/BP-569714203-172.17.0.2-1733077016602/current, will proceed with Du for space computation calculation, 2024-12-01T18:16:57,089 WARN [Thread-1347 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:16:57,092 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x179f982abaee0be5 with lease ID 0x42f37b5fdde55718: Processing first storage report for DS-9ce704c8-393e-4b44-92da-6d576b79fc71 from datanode DatanodeRegistration(127.0.0.1:42991, datanodeUuid=87bbe3d1-282f-4a3e-9baf-4faf058f58df, infoPort=39339, infoSecurePort=0, ipcPort=37373, storageInfo=lv=-57;cid=testClusterID;nsid=1917577208;c=1733077016602) 2024-12-01T18:16:57,092 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x179f982abaee0be5 with lease ID 0x42f37b5fdde55718: from storage DS-9ce704c8-393e-4b44-92da-6d576b79fc71 node DatanodeRegistration(127.0.0.1:42991, datanodeUuid=87bbe3d1-282f-4a3e-9baf-4faf058f58df, infoPort=39339, infoSecurePort=0, ipcPort=37373, storageInfo=lv=-57;cid=testClusterID;nsid=1917577208;c=1733077016602), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:16:57,092 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x179f982abaee0be5 with lease ID 0x42f37b5fdde55718: Processing first storage report for DS-6dc0f3a4-66ab-46b5-977f-1d47ad0ff8ce from datanode DatanodeRegistration(127.0.0.1:42991, datanodeUuid=87bbe3d1-282f-4a3e-9baf-4faf058f58df, infoPort=39339, infoSecurePort=0, ipcPort=37373, storageInfo=lv=-57;cid=testClusterID;nsid=1917577208;c=1733077016602) 2024-12-01T18:16:57,092 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x179f982abaee0be5 with lease ID 0x42f37b5fdde55718: from storage DS-6dc0f3a4-66ab-46b5-977f-1d47ad0ff8ce node DatanodeRegistration(127.0.0.1:42991, datanodeUuid=87bbe3d1-282f-4a3e-9baf-4faf058f58df, infoPort=39339, infoSecurePort=0, ipcPort=37373, storageInfo=lv=-57;cid=testClusterID;nsid=1917577208;c=1733077016602), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:16:57,147 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@34b4a513{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/java.io.tmpdir/jetty-localhost-42515-hadoop-hdfs-3_4_1-tests_jar-_-any-953230514377581248/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:16:57,147 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@706d0530{HTTP/1.1, (http/1.1)}{localhost:42515} 2024-12-01T18:16:57,147 INFO [Time-limited test {}] server.Server(415): Started @224441ms 2024-12-01T18:16:57,149 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:16:57,230 WARN [Thread-1394 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/cluster_ce066afb-d063-e204-6b1a-430c3fce4fab/dfs/data/data3/current/BP-569714203-172.17.0.2-1733077016602/current, will proceed with Du for space computation calculation, 2024-12-01T18:16:57,230 WARN [Thread-1395 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/cluster_ce066afb-d063-e204-6b1a-430c3fce4fab/dfs/data/data4/current/BP-569714203-172.17.0.2-1733077016602/current, will proceed with Du for space computation calculation, 2024-12-01T18:16:57,255 WARN [Thread-1383 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:16:57,257 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7b93c6e6d221338 with lease ID 0x42f37b5fdde55719: Processing first storage report for DS-4bfbf1b9-08e7-4b73-ad41-baac569c56ff from datanode DatanodeRegistration(127.0.0.1:37113, datanodeUuid=24164c7c-2d49-4ce9-8b1a-ce5b6e8c32ea, infoPort=42467, infoSecurePort=0, ipcPort=41181, storageInfo=lv=-57;cid=testClusterID;nsid=1917577208;c=1733077016602) 2024-12-01T18:16:57,257 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7b93c6e6d221338 with lease ID 0x42f37b5fdde55719: from storage DS-4bfbf1b9-08e7-4b73-ad41-baac569c56ff node DatanodeRegistration(127.0.0.1:37113, datanodeUuid=24164c7c-2d49-4ce9-8b1a-ce5b6e8c32ea, infoPort=42467, infoSecurePort=0, ipcPort=41181, storageInfo=lv=-57;cid=testClusterID;nsid=1917577208;c=1733077016602), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:16:57,257 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7b93c6e6d221338 with lease ID 0x42f37b5fdde55719: Processing first storage report for DS-b47e0d61-a2e3-49d4-b0b9-62c4567ca1fa from datanode DatanodeRegistration(127.0.0.1:37113, datanodeUuid=24164c7c-2d49-4ce9-8b1a-ce5b6e8c32ea, infoPort=42467, infoSecurePort=0, ipcPort=41181, storageInfo=lv=-57;cid=testClusterID;nsid=1917577208;c=1733077016602) 2024-12-01T18:16:57,257 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7b93c6e6d221338 with lease ID 0x42f37b5fdde55719: from storage DS-b47e0d61-a2e3-49d4-b0b9-62c4567ca1fa node DatanodeRegistration(127.0.0.1:37113, datanodeUuid=24164c7c-2d49-4ce9-8b1a-ce5b6e8c32ea, infoPort=42467, infoSecurePort=0, ipcPort=41181, storageInfo=lv=-57;cid=testClusterID;nsid=1917577208;c=1733077016602), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:16:57,276 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34 2024-12-01T18:16:57,279 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/cluster_ce066afb-d063-e204-6b1a-430c3fce4fab/zookeeper_0, clientPort=55440, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/cluster_ce066afb-d063-e204-6b1a-430c3fce4fab/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/cluster_ce066afb-d063-e204-6b1a-430c3fce4fab/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T18:16:57,280 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=55440 2024-12-01T18:16:57,280 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:57,282 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:57,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:16:57,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:16:57,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,306 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae with version=8 2024-12-01T18:16:57,306 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/hbase-staging 2024-12-01T18:16:57,309 INFO [Time-limited test {}] client.ConnectionUtils(129): master/b8365d49b74c:0 server-side Connection retries=45 2024-12-01T18:16:57,309 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:16:57,310 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:16:57,310 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:16:57,310 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:16:57,310 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:16:57,310 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:16:57,310 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:16:57,311 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35169 2024-12-01T18:16:57,311 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:57,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,313 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:57,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,315 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,316 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35169 connecting to ZooKeeper ensemble=127.0.0.1:55440 2024-12-01T18:16:57,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:351690x0, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:16:57,323 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35169-0x1004ecc5ba50000 connected 2024-12-01T18:16:57,345 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:16:57,346 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:16:57,347 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:16:57,348 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35169 2024-12-01T18:16:57,348 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35169 2024-12-01T18:16:57,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35169 2024-12-01T18:16:57,356 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35169 2024-12-01T18:16:57,359 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35169 2024-12-01T18:16:57,360 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae, hbase.cluster.distributed=false 2024-12-01T18:16:57,385 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b8365d49b74c:0 server-side Connection retries=45 2024-12-01T18:16:57,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:16:57,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:16:57,385 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:16:57,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:16:57,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:16:57,385 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:16:57,386 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:16:57,387 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41713 2024-12-01T18:16:57,387 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T18:16:57,388 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T18:16:57,389 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:57,391 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:57,395 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41713 connecting to ZooKeeper ensemble=127.0.0.1:55440 2024-12-01T18:16:57,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:417130x0, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:16:57,398 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:417130x0, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:16:57,398 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41713-0x1004ecc5ba50001 connected 2024-12-01T18:16:57,399 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:16:57,399 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:16:57,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41713 2024-12-01T18:16:57,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41713 2024-12-01T18:16:57,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41713 2024-12-01T18:16:57,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41713 2024-12-01T18:16:57,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41713 2024-12-01T18:16:57,413 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/b8365d49b74c,35169,1733077017309 2024-12-01T18:16:57,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:16:57,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:16:57,416 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b8365d49b74c,35169,1733077017309 2024-12-01T18:16:57,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:16:57,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:57,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:16:57,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:57,418 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:16:57,418 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b8365d49b74c,35169,1733077017309 from backup master directory 2024-12-01T18:16:57,419 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:16:57,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b8365d49b74c,35169,1733077017309 2024-12-01T18:16:57,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:16:57,420 WARN [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:16:57,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:16:57,420 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b8365d49b74c,35169,1733077017309 2024-12-01T18:16:57,427 DEBUG [M:0;b8365d49b74c:35169 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b8365d49b74c:35169 2024-12-01T18:16:57,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:16:57,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:16:57,434 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/hbase.id with ID: 15b47af1-03a5-4367-8a81-81a922f4f6da 2024-12-01T18:16:57,444 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:57,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:57,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:57,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:16:57,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:16:57,454 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:16:57,455 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T18:16:57,455 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:16:57,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:16:57,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:16:57,464 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store 2024-12-01T18:16:57,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:16:57,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:16:57,471 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:57,471 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:16:57,471 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:57,471 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:57,471 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:16:57,471 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:57,471 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:16:57,471 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:16:57,472 WARN [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/.initializing 2024-12-01T18:16:57,472 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/WALs/b8365d49b74c,35169,1733077017309 2024-12-01T18:16:57,475 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C35169%2C1733077017309, suffix=, logDir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/WALs/b8365d49b74c,35169,1733077017309, archiveDir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/oldWALs, maxLogs=10 2024-12-01T18:16:57,475 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C35169%2C1733077017309.1733077017475 2024-12-01T18:16:57,479 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/WALs/b8365d49b74c,35169,1733077017309/b8365d49b74c%2C35169%2C1733077017309.1733077017475 2024-12-01T18:16:57,480 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42467:42467),(127.0.0.1/127.0.0.1:39339:39339)] 2024-12-01T18:16:57,480 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:16:57,480 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:57,480 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:57,480 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:57,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:57,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T18:16:57,483 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:57,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:57,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:57,484 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T18:16:57,485 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:57,485 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:16:57,485 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:57,486 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T18:16:57,486 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:57,487 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:16:57,487 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:57,488 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T18:16:57,488 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:57,488 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:16:57,489 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:57,489 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:57,491 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T18:16:57,492 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:16:57,494 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:16:57,495 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=814825, jitterRate=0.036103859543800354}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T18:16:57,495 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:16:57,496 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T18:16:57,499 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76fd512e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:16:57,499 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-01T18:16:57,499 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T18:16:57,499 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T18:16:57,500 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T18:16:57,500 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-01T18:16:57,500 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-01T18:16:57,500 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T18:16:57,502 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T18:16:57,503 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T18:16:57,504 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-01T18:16:57,504 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T18:16:57,504 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T18:16:57,505 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-01T18:16:57,506 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T18:16:57,506 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T18:16:57,507 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-01T18:16:57,508 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T18:16:57,508 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T18:16:57,510 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T18:16:57,511 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T18:16:57,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:16:57,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:16:57,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:57,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:57,512 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=b8365d49b74c,35169,1733077017309, sessionid=0x1004ecc5ba50000, setting cluster-up flag (Was=false) 2024-12-01T18:16:57,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:57,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:57,518 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T18:16:57,519 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b8365d49b74c,35169,1733077017309 2024-12-01T18:16:57,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:57,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:57,525 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T18:16:57,526 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b8365d49b74c,35169,1733077017309 2024-12-01T18:16:57,528 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-01T18:16:57,528 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-01T18:16:57,528 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T18:16:57,528 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b8365d49b74c,35169,1733077017309 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T18:16:57,529 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:16:57,529 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:16:57,529 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:16:57,529 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:16:57,529 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b8365d49b74c:0, corePoolSize=10, maxPoolSize=10 2024-12-01T18:16:57,529 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:57,529 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b8365d49b74c:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:16:57,529 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:57,530 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733077047530 2024-12-01T18:16:57,530 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T18:16:57,530 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T18:16:57,530 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T18:16:57,530 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T18:16:57,530 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T18:16:57,530 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T18:16:57,530 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:57,530 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:16:57,531 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-01T18:16:57,531 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T18:16:57,531 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T18:16:57,531 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T18:16:57,531 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T18:16:57,531 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T18:16:57,531 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733077017531,5,FailOnTimeoutGroup] 2024-12-01T18:16:57,531 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:57,531 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733077017531,5,FailOnTimeoutGroup] 2024-12-01T18:16:57,532 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:57,532 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T18:16:57,532 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:57,532 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:57,532 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:16:57,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741831_1007 (size=1039) 2024-12-01T18:16:57,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741831_1007 (size=1039) 2024-12-01T18:16:57,541 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-01T18:16:57,541 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae 2024-12-01T18:16:57,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:16:57,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:16:57,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:57,549 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:16:57,550 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:16:57,550 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:57,551 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:57,551 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:16:57,552 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:16:57,552 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:57,553 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:57,553 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:16:57,554 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:16:57,554 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:57,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:57,555 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740 2024-12-01T18:16:57,556 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740 2024-12-01T18:16:57,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:57,557 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-01T18:16:57,558 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-01T18:16:57,560 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:16:57,561 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=709647, jitterRate=-0.09763778746128082}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-01T18:16:57,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-01T18:16:57,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:16:57,561 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:16:57,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:16:57,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:16:57,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:16:57,561 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-01T18:16:57,562 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:16:57,562 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:16:57,563 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-01T18:16:57,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T18:16:57,564 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T18:16:57,565 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T18:16:57,626 DEBUG [RS:0;b8365d49b74c:41713 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b8365d49b74c:41713 2024-12-01T18:16:57,627 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(1008): ClusterId : 15b47af1-03a5-4367-8a81-81a922f4f6da 2024-12-01T18:16:57,627 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T18:16:57,628 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T18:16:57,628 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T18:16:57,630 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T18:16:57,630 DEBUG [RS:0;b8365d49b74c:41713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@704d599f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:16:57,631 DEBUG [RS:0;b8365d49b74c:41713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7567e548, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b8365d49b74c/172.17.0.2:0 2024-12-01T18:16:57,631 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-01T18:16:57,631 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-01T18:16:57,631 DEBUG [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-01T18:16:57,631 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(3073): reportForDuty to master=b8365d49b74c,35169,1733077017309 with isa=b8365d49b74c/172.17.0.2:41713, startcode=1733077017384 2024-12-01T18:16:57,632 DEBUG [RS:0;b8365d49b74c:41713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:16:57,634 INFO [RS-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39395, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:16:57,634 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b8365d49b74c,41713,1733077017384 2024-12-01T18:16:57,634 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169 {}] master.ServerManager(486): Registering regionserver=b8365d49b74c,41713,1733077017384 2024-12-01T18:16:57,636 DEBUG [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae 2024-12-01T18:16:57,636 DEBUG [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:44441 2024-12-01T18:16:57,636 DEBUG [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-01T18:16:57,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:16:57,637 DEBUG [RS:0;b8365d49b74c:41713 {}] zookeeper.ZKUtil(111): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b8365d49b74c,41713,1733077017384 2024-12-01T18:16:57,638 WARN [RS:0;b8365d49b74c:41713 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:16:57,638 INFO [RS:0;b8365d49b74c:41713 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:16:57,638 DEBUG [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384 2024-12-01T18:16:57,638 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b8365d49b74c,41713,1733077017384] 2024-12-01T18:16:57,641 DEBUG [RS:0;b8365d49b74c:41713 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-01T18:16:57,641 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T18:16:57,642 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T18:16:57,643 INFO [RS:0;b8365d49b74c:41713 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:16:57,643 INFO [RS:0;b8365d49b74c:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:57,643 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-01T18:16:57,644 INFO [RS:0;b8365d49b74c:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:57,644 DEBUG [RS:0;b8365d49b74c:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:57,644 DEBUG [RS:0;b8365d49b74c:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:57,644 DEBUG [RS:0;b8365d49b74c:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:57,644 DEBUG [RS:0;b8365d49b74c:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:57,644 DEBUG [RS:0;b8365d49b74c:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:57,644 DEBUG [RS:0;b8365d49b74c:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b8365d49b74c:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:16:57,644 DEBUG [RS:0;b8365d49b74c:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:57,644 DEBUG [RS:0;b8365d49b74c:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:57,644 DEBUG [RS:0;b8365d49b74c:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:57,644 DEBUG [RS:0;b8365d49b74c:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:57,644 DEBUG [RS:0;b8365d49b74c:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:16:57,644 DEBUG [RS:0;b8365d49b74c:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:16:57,644 DEBUG [RS:0;b8365d49b74c:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:16:57,648 INFO [RS:0;b8365d49b74c:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:57,648 INFO [RS:0;b8365d49b74c:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:57,648 INFO [RS:0;b8365d49b74c:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:57,648 INFO [RS:0;b8365d49b74c:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:57,648 INFO [RS:0;b8365d49b74c:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41713,1733077017384-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:16:57,663 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T18:16:57,663 INFO [RS:0;b8365d49b74c:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41713,1733077017384-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:57,677 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.Replication(204): b8365d49b74c,41713,1733077017384 started 2024-12-01T18:16:57,678 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(1767): Serving as b8365d49b74c,41713,1733077017384, RpcServer on b8365d49b74c/172.17.0.2:41713, sessionid=0x1004ecc5ba50001 2024-12-01T18:16:57,678 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T18:16:57,678 DEBUG [RS:0;b8365d49b74c:41713 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b8365d49b74c,41713,1733077017384 2024-12-01T18:16:57,678 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,41713,1733077017384' 2024-12-01T18:16:57,678 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T18:16:57,678 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T18:16:57,679 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T18:16:57,679 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T18:16:57,679 DEBUG [RS:0;b8365d49b74c:41713 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b8365d49b74c,41713,1733077017384 2024-12-01T18:16:57,679 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,41713,1733077017384' 2024-12-01T18:16:57,679 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T18:16:57,679 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T18:16:57,679 DEBUG [RS:0;b8365d49b74c:41713 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T18:16:57,679 INFO [RS:0;b8365d49b74c:41713 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T18:16:57,679 INFO [RS:0;b8365d49b74c:41713 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T18:16:57,715 WARN [b8365d49b74c:35169 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-01T18:16:57,782 INFO [RS:0;b8365d49b74c:41713 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C41713%2C1733077017384, suffix=, logDir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384, archiveDir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/oldWALs, maxLogs=32 2024-12-01T18:16:57,783 INFO [RS:0;b8365d49b74c:41713 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41713%2C1733077017384.1733077017782 2024-12-01T18:16:57,793 INFO [RS:0;b8365d49b74c:41713 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.1733077017782 2024-12-01T18:16:57,793 DEBUG [RS:0;b8365d49b74c:41713 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39339:39339),(127.0.0.1/127.0.0.1:42467:42467)] 2024-12-01T18:16:57,816 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-01T18:16:57,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:16:57,965 DEBUG [b8365d49b74c:35169 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-01T18:16:57,966 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b8365d49b74c,41713,1733077017384 2024-12-01T18:16:57,967 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b8365d49b74c,41713,1733077017384, state=OPENING 2024-12-01T18:16:57,968 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T18:16:57,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:57,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:57,970 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=b8365d49b74c,41713,1733077017384}] 2024-12-01T18:16:57,970 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:16:57,970 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:16:58,123 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41713,1733077017384 2024-12-01T18:16:58,123 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T18:16:58,125 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51748, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T18:16:58,129 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-01T18:16:58,129 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:16:58,130 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C41713%2C1733077017384.meta, suffix=.meta, logDir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384, archiveDir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/oldWALs, maxLogs=32 2024-12-01T18:16:58,131 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41713%2C1733077017384.meta.1733077018131.meta 2024-12-01T18:16:58,136 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.meta.1733077018131.meta 2024-12-01T18:16:58,137 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42467:42467),(127.0.0.1/127.0.0.1:39339:39339)] 2024-12-01T18:16:58,137 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:16:58,137 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T18:16:58,137 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T18:16:58,137 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T18:16:58,137 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T18:16:58,137 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:58,137 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-01T18:16:58,137 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-01T18:16:58,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:16:58,139 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:16:58,139 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:58,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:58,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:16:58,141 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:16:58,141 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:58,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:58,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:16:58,142 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:16:58,142 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:58,142 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:16:58,143 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740 2024-12-01T18:16:58,144 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740 2024-12-01T18:16:58,146 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-01T18:16:58,147 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-01T18:16:58,148 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772165, jitterRate=-0.018141567707061768}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-01T18:16:58,148 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-01T18:16:58,149 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733077018123 2024-12-01T18:16:58,151 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T18:16:58,151 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-01T18:16:58,152 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,41713,1733077017384 2024-12-01T18:16:58,153 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b8365d49b74c,41713,1733077017384, state=OPEN 2024-12-01T18:16:58,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:16:58,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:16:58,157 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:16:58,157 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:16:58,159 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T18:16:58,159 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=b8365d49b74c,41713,1733077017384 in 187 msec 2024-12-01T18:16:58,161 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T18:16:58,161 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 596 msec 2024-12-01T18:16:58,163 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 635 msec 2024-12-01T18:16:58,163 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733077018163, completionTime=-1 2024-12-01T18:16:58,164 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-01T18:16:58,164 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-01T18:16:58,165 DEBUG [hconnection-0x5fd600cc-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:16:58,166 INFO [RS-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51758, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:16:58,167 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-01T18:16:58,167 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733077078167 2024-12-01T18:16:58,167 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733077138167 2024-12-01T18:16:58,167 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-01T18:16:58,172 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,35169,1733077017309-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:58,172 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,35169,1733077017309-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:58,172 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,35169,1733077017309-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:58,172 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b8365d49b74c:35169, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:58,172 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:58,172 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-01T18:16:58,173 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:16:58,174 DEBUG [master/b8365d49b74c:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-01T18:16:58,174 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-01T18:16:58,175 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T18:16:58,175 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:58,176 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T18:16:58,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741835_1011 (size=358) 2024-12-01T18:16:58,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741835_1011 (size=358) 2024-12-01T18:16:58,185 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 15d5cc09b866ddd676c3ef9838a9f9be, NAME => 'hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae 2024-12-01T18:16:58,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741836_1012 (size=42) 2024-12-01T18:16:58,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741836_1012 (size=42) 2024-12-01T18:16:58,192 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:58,192 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 15d5cc09b866ddd676c3ef9838a9f9be, disabling compactions & flushes 2024-12-01T18:16:58,192 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:16:58,192 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:16:58,192 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. after waiting 0 ms 2024-12-01T18:16:58,192 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:16:58,192 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:16:58,192 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 15d5cc09b866ddd676c3ef9838a9f9be: 2024-12-01T18:16:58,193 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T18:16:58,194 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733077018193"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733077018193"}]},"ts":"1733077018193"} 2024-12-01T18:16:58,196 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-01T18:16:58,196 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T18:16:58,197 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733077018197"}]},"ts":"1733077018197"} 2024-12-01T18:16:58,198 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-01T18:16:58,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=15d5cc09b866ddd676c3ef9838a9f9be, ASSIGN}] 2024-12-01T18:16:58,203 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=15d5cc09b866ddd676c3ef9838a9f9be, ASSIGN 2024-12-01T18:16:58,203 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=15d5cc09b866ddd676c3ef9838a9f9be, ASSIGN; state=OFFLINE, location=b8365d49b74c,41713,1733077017384; forceNewPlan=false, retain=false 2024-12-01T18:16:58,354 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=15d5cc09b866ddd676c3ef9838a9f9be, regionState=OPENING, regionLocation=b8365d49b74c,41713,1733077017384 2024-12-01T18:16:58,356 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 15d5cc09b866ddd676c3ef9838a9f9be, server=b8365d49b74c,41713,1733077017384}] 2024-12-01T18:16:58,509 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41713,1733077017384 2024-12-01T18:16:58,513 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:16:58,513 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 15d5cc09b866ddd676c3ef9838a9f9be, NAME => 'hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:16:58,513 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 15d5cc09b866ddd676c3ef9838a9f9be 2024-12-01T18:16:58,513 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:58,513 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 15d5cc09b866ddd676c3ef9838a9f9be 2024-12-01T18:16:58,513 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 15d5cc09b866ddd676c3ef9838a9f9be 2024-12-01T18:16:58,515 INFO [StoreOpener-15d5cc09b866ddd676c3ef9838a9f9be-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 15d5cc09b866ddd676c3ef9838a9f9be 2024-12-01T18:16:58,516 INFO [StoreOpener-15d5cc09b866ddd676c3ef9838a9f9be-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 15d5cc09b866ddd676c3ef9838a9f9be columnFamilyName info 2024-12-01T18:16:58,516 DEBUG [StoreOpener-15d5cc09b866ddd676c3ef9838a9f9be-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:58,517 INFO [StoreOpener-15d5cc09b866ddd676c3ef9838a9f9be-1 {}] regionserver.HStore(327): Store=15d5cc09b866ddd676c3ef9838a9f9be/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:16:58,518 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/namespace/15d5cc09b866ddd676c3ef9838a9f9be 2024-12-01T18:16:58,518 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/namespace/15d5cc09b866ddd676c3ef9838a9f9be 2024-12-01T18:16:58,520 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 15d5cc09b866ddd676c3ef9838a9f9be 2024-12-01T18:16:58,522 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/namespace/15d5cc09b866ddd676c3ef9838a9f9be/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:16:58,523 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 15d5cc09b866ddd676c3ef9838a9f9be; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=784953, jitterRate=-0.0018814057111740112}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:16:58,523 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 15d5cc09b866ddd676c3ef9838a9f9be: 2024-12-01T18:16:58,524 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be., pid=6, masterSystemTime=1733077018509 2024-12-01T18:16:58,526 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:16:58,526 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:16:58,527 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=15d5cc09b866ddd676c3ef9838a9f9be, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,41713,1733077017384 2024-12-01T18:16:58,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T18:16:58,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 15d5cc09b866ddd676c3ef9838a9f9be, server=b8365d49b74c,41713,1733077017384 in 173 msec 2024-12-01T18:16:58,532 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T18:16:58,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=15d5cc09b866ddd676c3ef9838a9f9be, ASSIGN in 329 msec 2024-12-01T18:16:58,533 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T18:16:58,534 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733077018533"}]},"ts":"1733077018533"} 2024-12-01T18:16:58,535 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-01T18:16:58,538 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T18:16:58,540 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 365 msec 2024-12-01T18:16:58,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:16:58,575 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-01T18:16:58,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:16:58,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:58,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:16:58,581 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-01T18:16:58,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:16:58,592 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 11 msec 2024-12-01T18:16:58,603 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-01T18:16:58,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:16:58,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 10 msec 2024-12-01T18:16:58,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-01T18:16:58,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-01T18:16:58,630 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.209sec 2024-12-01T18:16:58,630 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-01T18:16:58,630 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T18:16:58,630 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T18:16:58,630 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T18:16:58,630 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T18:16:58,630 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,35169,1733077017309-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:16:58,630 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,35169,1733077017309-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T18:16:58,632 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-01T18:16:58,632 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T18:16:58,632 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,35169,1733077017309-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:16:58,716 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6c9995dc to 127.0.0.1:55440 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6b4591ca 2024-12-01T18:16:58,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e0e3135, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:16:58,721 DEBUG [hconnection-0x4c97e8ff-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:16:58,723 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51770, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:16:58,724 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=b8365d49b74c,35169,1733077017309 2024-12-01T18:16:58,725 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:16:58,729 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-01T18:16:58,730 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T18:16:58,732 INFO [RS-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T18:16:58,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-01T18:16:58,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-01T18:16:58,733 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:16:58,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-01T18:16:58,735 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T18:16:58,735 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:58,736 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 9 2024-12-01T18:16:58,736 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T18:16:58,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-01T18:16:58,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741837_1013 (size=405) 2024-12-01T18:16:58,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741837_1013 (size=405) 2024-12-01T18:16:58,746 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 42ae786bffae56672d6b849461481de7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae 2024-12-01T18:16:58,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741838_1014 (size=88) 2024-12-01T18:16:58,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741838_1014 (size=88) 2024-12-01T18:16:58,754 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:58,754 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1681): Closing 42ae786bffae56672d6b849461481de7, disabling compactions & flushes 2024-12-01T18:16:58,754 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:16:58,754 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:16:58,754 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. after waiting 0 ms 2024-12-01T18:16:58,754 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:16:58,754 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:16:58,754 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 42ae786bffae56672d6b849461481de7: 2024-12-01T18:16:58,756 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T18:16:58,756 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733077018756"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733077018756"}]},"ts":"1733077018756"} 2024-12-01T18:16:58,758 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-01T18:16:58,759 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T18:16:58,759 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733077018759"}]},"ts":"1733077018759"} 2024-12-01T18:16:58,760 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-01T18:16:58,765 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=42ae786bffae56672d6b849461481de7, ASSIGN}] 2024-12-01T18:16:58,766 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=42ae786bffae56672d6b849461481de7, ASSIGN 2024-12-01T18:16:58,766 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=42ae786bffae56672d6b849461481de7, ASSIGN; state=OFFLINE, location=b8365d49b74c,41713,1733077017384; forceNewPlan=false, retain=false 2024-12-01T18:16:58,917 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=42ae786bffae56672d6b849461481de7, regionState=OPENING, regionLocation=b8365d49b74c,41713,1733077017384 2024-12-01T18:16:58,919 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 42ae786bffae56672d6b849461481de7, server=b8365d49b74c,41713,1733077017384}] 2024-12-01T18:16:59,071 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41713,1733077017384 2024-12-01T18:16:59,075 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:16:59,076 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 42ae786bffae56672d6b849461481de7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:16:59,076 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 42ae786bffae56672d6b849461481de7 2024-12-01T18:16:59,076 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:16:59,076 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 42ae786bffae56672d6b849461481de7 2024-12-01T18:16:59,076 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 42ae786bffae56672d6b849461481de7 2024-12-01T18:16:59,077 INFO [StoreOpener-42ae786bffae56672d6b849461481de7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 42ae786bffae56672d6b849461481de7 2024-12-01T18:16:59,079 INFO [StoreOpener-42ae786bffae56672d6b849461481de7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 42ae786bffae56672d6b849461481de7 columnFamilyName info 2024-12-01T18:16:59,079 DEBUG [StoreOpener-42ae786bffae56672d6b849461481de7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:16:59,080 INFO [StoreOpener-42ae786bffae56672d6b849461481de7-1 {}] regionserver.HStore(327): Store=42ae786bffae56672d6b849461481de7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:16:59,081 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7 2024-12-01T18:16:59,081 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7 2024-12-01T18:16:59,083 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 42ae786bffae56672d6b849461481de7 2024-12-01T18:16:59,086 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:16:59,086 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 42ae786bffae56672d6b849461481de7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767193, jitterRate=-0.02446487545967102}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:16:59,087 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 42ae786bffae56672d6b849461481de7: 2024-12-01T18:16:59,088 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7., pid=11, masterSystemTime=1733077019071 2024-12-01T18:16:59,090 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:16:59,090 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:16:59,090 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=42ae786bffae56672d6b849461481de7, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,41713,1733077017384 2024-12-01T18:16:59,094 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-01T18:16:59,094 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 42ae786bffae56672d6b849461481de7, server=b8365d49b74c,41713,1733077017384 in 173 msec 2024-12-01T18:16:59,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-01T18:16:59,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=42ae786bffae56672d6b849461481de7, ASSIGN in 330 msec 2024-12-01T18:16:59,097 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T18:16:59,097 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733077019097"}]},"ts":"1733077019097"} 2024-12-01T18:16:59,099 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-01T18:16:59,101 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T18:16:59,103 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 368 msec 2024-12-01T18:16:59,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:00,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:01,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:02,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:03,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:03,639 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-01T18:17:03,640 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:17:03,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:17:03,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:17:03,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:17:03,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:17:03,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:17:03,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:17:03,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:17:03,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:17:03,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:17:03,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:17:03,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:17:03,666 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:17:03,677 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-01T18:17:03,678 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-01T18:17:04,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:05,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:06,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:07,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:07,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta after 68045ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor199.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:17:08,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:08,601 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-01T18:17:08,602 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-01T18:17:08,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-01T18:17:08,738 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 9 completed 2024-12-01T18:17:08,741 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-01T18:17:08,741 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:08,748 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush hbase:namespace 2024-12-01T18:17:08,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace 2024-12-01T18:17:08,754 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_PREPARE 2024-12-01T18:17:08,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-01T18:17:08,755 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-01T18:17:08,757 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-01T18:17:08,917 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41713,1733077017384 2024-12-01T18:17:08,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-01T18:17:08,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:17:08,919 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 15d5cc09b866ddd676c3ef9838a9f9be 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-01T18:17:08,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/namespace/15d5cc09b866ddd676c3ef9838a9f9be/.tmp/info/af18be010acd49558a0b51fe9c92b30d is 45, key is default/info:d/1733077018585/Put/seqid=0 2024-12-01T18:17:08,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741839_1015 (size=5037) 2024-12-01T18:17:08,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741839_1015 (size=5037) 2024-12-01T18:17:08,941 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/namespace/15d5cc09b866ddd676c3ef9838a9f9be/.tmp/info/af18be010acd49558a0b51fe9c92b30d 2024-12-01T18:17:08,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/namespace/15d5cc09b866ddd676c3ef9838a9f9be/.tmp/info/af18be010acd49558a0b51fe9c92b30d as hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/namespace/15d5cc09b866ddd676c3ef9838a9f9be/info/af18be010acd49558a0b51fe9c92b30d 2024-12-01T18:17:08,954 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/namespace/15d5cc09b866ddd676c3ef9838a9f9be/info/af18be010acd49558a0b51fe9c92b30d, entries=2, sequenceid=6, filesize=4.9 K 2024-12-01T18:17:08,955 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 15d5cc09b866ddd676c3ef9838a9f9be in 36ms, sequenceid=6, compaction requested=false 2024-12-01T18:17:08,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 15d5cc09b866ddd676c3ef9838a9f9be: 2024-12-01T18:17:08,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:17:08,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-01T18:17:08,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-01T18:17:08,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-01T18:17:08,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 203 msec 2024-12-01T18:17:08,964 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace in 212 msec 2024-12-01T18:17:09,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:10,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:11,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:12,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:13,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:14,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:15,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:16,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:17,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:18,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:18,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-01T18:17:18,756 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: hbase:namespace, procId: 12 completed 2024-12-01T18:17:18,764 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-01T18:17:18,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-01T18:17:18,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-01T18:17:18,766 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-01T18:17:18,767 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-01T18:17:18,767 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-01T18:17:18,919 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41713,1733077017384 2024-12-01T18:17:18,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-01T18:17:18,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:18,920 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 42ae786bffae56672d6b849461481de7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-01T18:17:18,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/fd60e4df40d64d1f96e571e8d67c8bab is 1080, key is row0001/info:/1733077038760/Put/seqid=0 2024-12-01T18:17:18,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741840_1016 (size=6033) 2024-12-01T18:17:18,942 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/fd60e4df40d64d1f96e571e8d67c8bab 2024-12-01T18:17:18,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741840_1016 (size=6033) 2024-12-01T18:17:18,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/fd60e4df40d64d1f96e571e8d67c8bab as hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/fd60e4df40d64d1f96e571e8d67c8bab 2024-12-01T18:17:18,954 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/fd60e4df40d64d1f96e571e8d67c8bab, entries=1, sequenceid=5, filesize=5.9 K 2024-12-01T18:17:18,955 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 42ae786bffae56672d6b849461481de7 in 35ms, sequenceid=5, compaction requested=false 2024-12-01T18:17:18,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 42ae786bffae56672d6b849461481de7: 2024-12-01T18:17:18,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:18,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-01T18:17:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-01T18:17:18,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-01T18:17:18,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 190 msec 2024-12-01T18:17:18,961 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-12-01T18:17:19,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:20,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:21,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:22,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:23,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:24,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:25,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:26,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:27,275 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T18:17:27,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:28,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:28,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-01T18:17:28,767 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 14 completed 2024-12-01T18:17:28,772 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-01T18:17:28,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-01T18:17:28,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-01T18:17:28,774 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-01T18:17:28,774 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-01T18:17:28,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-01T18:17:28,926 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41713,1733077017384 2024-12-01T18:17:28,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-01T18:17:28,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:28,928 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 42ae786bffae56672d6b849461481de7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-01T18:17:28,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/60b29e8976a84ca793f9cdcb4782c8a9 is 1080, key is row0002/info:/1733077048768/Put/seqid=0 2024-12-01T18:17:28,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741841_1017 (size=6033) 2024-12-01T18:17:28,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741841_1017 (size=6033) 2024-12-01T18:17:28,941 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/60b29e8976a84ca793f9cdcb4782c8a9 2024-12-01T18:17:28,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/60b29e8976a84ca793f9cdcb4782c8a9 as hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/60b29e8976a84ca793f9cdcb4782c8a9 2024-12-01T18:17:28,952 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/60b29e8976a84ca793f9cdcb4782c8a9, entries=1, sequenceid=9, filesize=5.9 K 2024-12-01T18:17:28,953 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 42ae786bffae56672d6b849461481de7 in 26ms, sequenceid=9, compaction requested=false 2024-12-01T18:17:28,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 42ae786bffae56672d6b849461481de7: 2024-12-01T18:17:28,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:28,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-01T18:17:28,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-01T18:17:28,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-01T18:17:28,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-12-01T18:17:28,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-12-01T18:17:29,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:29,631 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:17:29,633 INFO [RS-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48816, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:17:30,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:31,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:32,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:33,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:34,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:35,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:36,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:37,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:38,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:38,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-01T18:17:38,775 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 16 completed 2024-12-01T18:17:38,777 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41713%2C1733077017384.1733077058777 2024-12-01T18:17:38,785 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.1733077017782 with entries=13, filesize=6.41 KB; new WAL /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.1733077058777 2024-12-01T18:17:38,785 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39339:39339),(127.0.0.1/127.0.0.1:42467:42467)] 2024-12-01T18:17:38,785 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.1733077017782 is not closed yet, will try archiving it next time 2024-12-01T18:17:38,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741833_1009 (size=6574) 2024-12-01T18:17:38,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741833_1009 (size=6574) 2024-12-01T18:17:38,789 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-01T18:17:38,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-01T18:17:38,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-01T18:17:38,791 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-01T18:17:38,791 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-01T18:17:38,791 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-01T18:17:38,943 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41713,1733077017384 2024-12-01T18:17:38,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-01T18:17:38,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:38,944 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 42ae786bffae56672d6b849461481de7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-01T18:17:38,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/2c60e64e1067446a94cbc02ad42d1e20 is 1080, key is row0003/info:/1733077058776/Put/seqid=0 2024-12-01T18:17:38,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741843_1019 (size=6033) 2024-12-01T18:17:38,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741843_1019 (size=6033) 2024-12-01T18:17:38,987 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/2c60e64e1067446a94cbc02ad42d1e20 2024-12-01T18:17:38,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/2c60e64e1067446a94cbc02ad42d1e20 as hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/2c60e64e1067446a94cbc02ad42d1e20 2024-12-01T18:17:39,000 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/2c60e64e1067446a94cbc02ad42d1e20, entries=1, sequenceid=13, filesize=5.9 K 2024-12-01T18:17:39,002 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 42ae786bffae56672d6b849461481de7 in 58ms, sequenceid=13, compaction requested=true 2024-12-01T18:17:39,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 42ae786bffae56672d6b849461481de7: 2024-12-01T18:17:39,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:39,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-01T18:17:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-01T18:17:39,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-01T18:17:39,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 212 msec 2024-12-01T18:17:39,007 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 217 msec 2024-12-01T18:17:39,226 INFO [master/b8365d49b74c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-01T18:17:39,226 INFO [master/b8365d49b74c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-01T18:17:39,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:40,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:41,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:42,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:43,513 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 15d5cc09b866ddd676c3ef9838a9f9be, had cached 0 bytes from a total of 5037 2024-12-01T18:17:43,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:44,076 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 42ae786bffae56672d6b849461481de7, had cached 0 bytes from a total of 18099 2024-12-01T18:17:44,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:45,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:46,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:47,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:48,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:48,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-01T18:17:48,792 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 18 completed 2024-12-01T18:17:48,792 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-01T18:17:48,793 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-01T18:17:48,793 DEBUG [Time-limited test {}] regionserver.HStore(1540): 42ae786bffae56672d6b849461481de7/info is initiating minor compaction (all files) 2024-12-01T18:17:48,793 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:17:48,793 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:17:48,794 INFO [Time-limited test {}] regionserver.HRegion(2351): Starting compaction of 42ae786bffae56672d6b849461481de7/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:48,794 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/fd60e4df40d64d1f96e571e8d67c8bab, hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/60b29e8976a84ca793f9cdcb4782c8a9, hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/2c60e64e1067446a94cbc02ad42d1e20] into tmpdir=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp, totalSize=17.7 K 2024-12-01T18:17:48,794 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting fd60e4df40d64d1f96e571e8d67c8bab, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733077038760 2024-12-01T18:17:48,795 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 60b29e8976a84ca793f9cdcb4782c8a9, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733077048768 2024-12-01T18:17:48,795 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 2c60e64e1067446a94cbc02ad42d1e20, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733077058776 2024-12-01T18:17:48,806 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 42ae786bffae56672d6b849461481de7#info#compaction#29 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-01T18:17:48,806 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/28efe3b63c354123a83e268cf6bf03a6 is 1080, key is row0001/info:/1733077038760/Put/seqid=0 2024-12-01T18:17:48,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741844_1020 (size=8296) 2024-12-01T18:17:48,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741844_1020 (size=8296) 2024-12-01T18:17:48,816 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/28efe3b63c354123a83e268cf6bf03a6 as hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/28efe3b63c354123a83e268cf6bf03a6 2024-12-01T18:17:48,822 INFO [Time-limited test {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 42ae786bffae56672d6b849461481de7/info of 42ae786bffae56672d6b849461481de7 into 28efe3b63c354123a83e268cf6bf03a6(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-01T18:17:48,822 DEBUG [Time-limited test {}] regionserver.HRegion(2381): Compaction status journal for 42ae786bffae56672d6b849461481de7: 2024-12-01T18:17:48,824 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41713%2C1733077017384.1733077068824 2024-12-01T18:17:48,830 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.1733077058777 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.1733077068824 2024-12-01T18:17:48,830 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39339:39339),(127.0.0.1/127.0.0.1:42467:42467)] 2024-12-01T18:17:48,830 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.1733077058777 is not closed yet, will try archiving it next time 2024-12-01T18:17:48,830 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.1733077017782 to hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/oldWALs/b8365d49b74c%2C41713%2C1733077017384.1733077017782 2024-12-01T18:17:48,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741842_1018 (size=2520) 2024-12-01T18:17:48,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741842_1018 (size=2520) 2024-12-01T18:17:48,834 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-01T18:17:48,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-01T18:17:48,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-01T18:17:48,836 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-01T18:17:48,837 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-01T18:17:48,837 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-01T18:17:48,989 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41713,1733077017384 2024-12-01T18:17:48,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-01T18:17:48,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:48,990 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 42ae786bffae56672d6b849461481de7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-01T18:17:48,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/e1b4ae0af7e24daab4cfd0a8230d8bd1 is 1080, key is row0000/info:/1733077068823/Put/seqid=0 2024-12-01T18:17:48,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741846_1022 (size=6033) 2024-12-01T18:17:48,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741846_1022 (size=6033) 2024-12-01T18:17:48,999 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/e1b4ae0af7e24daab4cfd0a8230d8bd1 2024-12-01T18:17:49,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/e1b4ae0af7e24daab4cfd0a8230d8bd1 as hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/e1b4ae0af7e24daab4cfd0a8230d8bd1 2024-12-01T18:17:49,011 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/e1b4ae0af7e24daab4cfd0a8230d8bd1, entries=1, sequenceid=18, filesize=5.9 K 2024-12-01T18:17:49,012 INFO [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 42ae786bffae56672d6b849461481de7 in 23ms, sequenceid=18, compaction requested=false 2024-12-01T18:17:49,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 42ae786bffae56672d6b849461481de7: 2024-12-01T18:17:49,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:49,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-01T18:17:49,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-01T18:17:49,016 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-01T18:17:49,016 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-12-01T18:17:49,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-12-01T18:17:49,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:50,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:51,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:52,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:53,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:54,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:55,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:56,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:57,276 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T18:17:57,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:58,182 DEBUG [master/b8365d49b74c:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-01T18:17:58,182 DEBUG [master/b8365d49b74c:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 15d5cc09b866ddd676c3ef9838a9f9be changed from -1.0 to 0.0, refreshing cache 2024-12-01T18:17:58,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:58,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-01T18:17:58,837 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 20 completed 2024-12-01T18:17:58,839 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41713%2C1733077017384.1733077078839 2024-12-01T18:17:58,846 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.1733077068824 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.1733077078839 2024-12-01T18:17:58,846 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39339:39339),(127.0.0.1/127.0.0.1:42467:42467)] 2024-12-01T18:17:58,846 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.1733077068824 is not closed yet, will try archiving it next time 2024-12-01T18:17:58,846 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.1733077058777 to hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/oldWALs/b8365d49b74c%2C41713%2C1733077017384.1733077058777 2024-12-01T18:17:58,846 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-01T18:17:58,846 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-01T18:17:58,846 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6c9995dc to 127.0.0.1:55440 2024-12-01T18:17:58,846 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:17:58,846 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T18:17:58,846 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1481303323, stopped=false 2024-12-01T18:17:58,846 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=b8365d49b74c,35169,1733077017309 2024-12-01T18:17:58,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741845_1021 (size=2026) 2024-12-01T18:17:58,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:17:58,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:17:58,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:17:58,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:17:58,849 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-01T18:17:58,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741845_1021 (size=2026) 2024-12-01T18:17:58,849 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:17:58,849 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b8365d49b74c,41713,1733077017384' ***** 2024-12-01T18:17:58,849 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-01T18:17:58,849 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T18:17:58,849 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-01T18:17:58,850 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:17:58,850 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:17:58,850 INFO [RS:0;b8365d49b74c:41713 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T18:17:58,850 INFO [RS:0;b8365d49b74c:41713 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T18:17:58,850 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(3579): Received CLOSE for 42ae786bffae56672d6b849461481de7 2024-12-01T18:17:58,850 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(3579): Received CLOSE for 15d5cc09b866ddd676c3ef9838a9f9be 2024-12-01T18:17:58,850 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(1224): stopping server b8365d49b74c,41713,1733077017384 2024-12-01T18:17:58,850 DEBUG [RS:0;b8365d49b74c:41713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:17:58,850 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 42ae786bffae56672d6b849461481de7, disabling compactions & flushes 2024-12-01T18:17:58,850 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:58,850 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T18:17:58,850 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:58,850 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T18:17:58,850 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. after waiting 0 ms 2024-12-01T18:17:58,851 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T18:17:58,851 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:58,851 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-01T18:17:58,851 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 42ae786bffae56672d6b849461481de7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-01T18:17:58,851 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-01T18:17:58,851 DEBUG [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(1603): Online Regions={42ae786bffae56672d6b849461481de7=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7., 1588230740=hbase:meta,,1.1588230740, 15d5cc09b866ddd676c3ef9838a9f9be=hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be.} 2024-12-01T18:17:58,851 DEBUG [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 15d5cc09b866ddd676c3ef9838a9f9be, 42ae786bffae56672d6b849461481de7 2024-12-01T18:17:58,851 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:17:58,851 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:17:58,851 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:17:58,851 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:17:58,851 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:17:58,851 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=3.05 KB heapSize=5.55 KB 2024-12-01T18:17:58,857 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/191dff4b13b74e93909d6a82579e2d88 is 1080, key is row0001/info:/1733077078838/Put/seqid=0 2024-12-01T18:17:58,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741848_1024 (size=6033) 2024-12-01T18:17:58,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741848_1024 (size=6033) 2024-12-01T18:17:58,862 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/191dff4b13b74e93909d6a82579e2d88 2024-12-01T18:17:58,868 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/.tmp/info/191dff4b13b74e93909d6a82579e2d88 as hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/191dff4b13b74e93909d6a82579e2d88 2024-12-01T18:17:58,874 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/191dff4b13b74e93909d6a82579e2d88, entries=1, sequenceid=22, filesize=5.9 K 2024-12-01T18:17:58,875 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 42ae786bffae56672d6b849461481de7 in 23ms, sequenceid=22, compaction requested=true 2024-12-01T18:17:58,875 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/fd60e4df40d64d1f96e571e8d67c8bab, hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/60b29e8976a84ca793f9cdcb4782c8a9, hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/2c60e64e1067446a94cbc02ad42d1e20] to archive 2024-12-01T18:17:58,876 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-01T18:17:58,878 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/fd60e4df40d64d1f96e571e8d67c8bab to hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/fd60e4df40d64d1f96e571e8d67c8bab 2024-12-01T18:17:58,878 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740/.tmp/info/c067229ce5834bfe9384f4f9186c01df is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7./info:regioninfo/1733077019090/Put/seqid=0 2024-12-01T18:17:58,879 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/60b29e8976a84ca793f9cdcb4782c8a9 to hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/60b29e8976a84ca793f9cdcb4782c8a9 2024-12-01T18:17:58,880 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/2c60e64e1067446a94cbc02ad42d1e20 to hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/info/2c60e64e1067446a94cbc02ad42d1e20 2024-12-01T18:17:58,888 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/42ae786bffae56672d6b849461481de7/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-01T18:17:58,889 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:58,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741849_1025 (size=8430) 2024-12-01T18:17:58,889 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 42ae786bffae56672d6b849461481de7: 2024-12-01T18:17:58,889 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733077018733.42ae786bffae56672d6b849461481de7. 2024-12-01T18:17:58,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741849_1025 (size=8430) 2024-12-01T18:17:58,890 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 15d5cc09b866ddd676c3ef9838a9f9be, disabling compactions & flushes 2024-12-01T18:17:58,890 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:17:58,890 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:17:58,890 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. after waiting 0 ms 2024-12-01T18:17:58,890 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:17:58,890 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740/.tmp/info/c067229ce5834bfe9384f4f9186c01df 2024-12-01T18:17:58,893 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/namespace/15d5cc09b866ddd676c3ef9838a9f9be/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T18:17:58,894 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:17:58,894 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 15d5cc09b866ddd676c3ef9838a9f9be: 2024-12-01T18:17:58,894 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733077018172.15d5cc09b866ddd676c3ef9838a9f9be. 2024-12-01T18:17:58,918 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740/.tmp/table/ec16683de59c450294bcd0fd42ca5717 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733077019097/Put/seqid=0 2024-12-01T18:17:58,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741850_1026 (size=5532) 2024-12-01T18:17:58,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741850_1026 (size=5532) 2024-12-01T18:17:58,924 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=264 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740/.tmp/table/ec16683de59c450294bcd0fd42ca5717 2024-12-01T18:17:58,930 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740/.tmp/info/c067229ce5834bfe9384f4f9186c01df as hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740/info/c067229ce5834bfe9384f4f9186c01df 2024-12-01T18:17:58,934 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740/info/c067229ce5834bfe9384f4f9186c01df, entries=20, sequenceid=14, filesize=8.2 K 2024-12-01T18:17:58,935 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740/.tmp/table/ec16683de59c450294bcd0fd42ca5717 as hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740/table/ec16683de59c450294bcd0fd42ca5717 2024-12-01T18:17:58,940 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740/table/ec16683de59c450294bcd0fd42ca5717, entries=4, sequenceid=14, filesize=5.4 K 2024-12-01T18:17:58,941 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~3.05 KB/3122, heapSize ~5.27 KB/5400, currentSize=0 B/0 for 1588230740 in 90ms, sequenceid=14, compaction requested=false 2024-12-01T18:17:58,945 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-01T18:17:58,945 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T18:17:58,945 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-01T18:17:58,945 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:17:58,945 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-01T18:17:59,051 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(1250): stopping server b8365d49b74c,41713,1733077017384; all regions closed. 2024-12-01T18:17:59,052 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384 2024-12-01T18:17:59,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741834_1010 (size=4570) 2024-12-01T18:17:59,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741834_1010 (size=4570) 2024-12-01T18:17:59,056 DEBUG [RS:0;b8365d49b74c:41713 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/oldWALs 2024-12-01T18:17:59,056 INFO [RS:0;b8365d49b74c:41713 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b8365d49b74c%2C41713%2C1733077017384.meta:.meta(num 1733077018131) 2024-12-01T18:17:59,057 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384 2024-12-01T18:17:59,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741847_1023 (size=1545) 2024-12-01T18:17:59,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741847_1023 (size=1545) 2024-12-01T18:17:59,249 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/WALs/b8365d49b74c,41713,1733077017384/b8365d49b74c%2C41713%2C1733077017384.1733077068824 to hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/oldWALs/b8365d49b74c%2C41713%2C1733077017384.1733077068824 2024-12-01T18:17:59,251 DEBUG [RS:0;b8365d49b74c:41713 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/oldWALs 2024-12-01T18:17:59,251 INFO [RS:0;b8365d49b74c:41713 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b8365d49b74c%2C41713%2C1733077017384:(num 1733077078839) 2024-12-01T18:17:59,251 DEBUG [RS:0;b8365d49b74c:41713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:17:59,251 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:17:59,252 INFO [RS:0;b8365d49b74c:41713 {}] hbase.ChoreService(370): Chore service for: regionserver/b8365d49b74c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-01T18:17:59,252 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-01T18:17:59,252 INFO [RS:0;b8365d49b74c:41713 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41713 2024-12-01T18:17:59,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b8365d49b74c,41713,1733077017384 2024-12-01T18:17:59,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:17:59,256 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b8365d49b74c,41713,1733077017384] 2024-12-01T18:17:59,256 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b8365d49b74c,41713,1733077017384; numProcessing=1 2024-12-01T18:17:59,257 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b8365d49b74c,41713,1733077017384 already deleted, retry=false 2024-12-01T18:17:59,257 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b8365d49b74c,41713,1733077017384 expired; onlineServers=0 2024-12-01T18:17:59,257 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b8365d49b74c,35169,1733077017309' ***** 2024-12-01T18:17:59,257 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T18:17:59,258 DEBUG [M:0;b8365d49b74c:35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@105ca249, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b8365d49b74c/172.17.0.2:0 2024-12-01T18:17:59,258 INFO [M:0;b8365d49b74c:35169 {}] regionserver.HRegionServer(1224): stopping server b8365d49b74c,35169,1733077017309 2024-12-01T18:17:59,258 INFO [M:0;b8365d49b74c:35169 {}] regionserver.HRegionServer(1250): stopping server b8365d49b74c,35169,1733077017309; all regions closed. 2024-12-01T18:17:59,258 DEBUG [M:0;b8365d49b74c:35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:17:59,258 DEBUG [M:0;b8365d49b74c:35169 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T18:17:59,258 DEBUG [M:0;b8365d49b74c:35169 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T18:17:59,258 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T18:17:59,258 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733077017531 {}] cleaner.HFileCleaner(306): Exit Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733077017531,5,FailOnTimeoutGroup] 2024-12-01T18:17:59,258 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733077017531 {}] cleaner.HFileCleaner(306): Exit Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733077017531,5,FailOnTimeoutGroup] 2024-12-01T18:17:59,258 INFO [M:0;b8365d49b74c:35169 {}] hbase.ChoreService(370): Chore service for: master/b8365d49b74c:0 had [] on shutdown 2024-12-01T18:17:59,258 DEBUG [M:0;b8365d49b74c:35169 {}] master.HMaster(1733): Stopping service threads 2024-12-01T18:17:59,258 INFO [M:0;b8365d49b74c:35169 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T18:17:59,259 INFO [M:0;b8365d49b74c:35169 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T18:17:59,259 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T18:17:59,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T18:17:59,259 DEBUG [M:0;b8365d49b74c:35169 {}] zookeeper.ZKUtil(347): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-01T18:17:59,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:17:59,259 WARN [M:0;b8365d49b74c:35169 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-01T18:17:59,259 INFO [M:0;b8365d49b74c:35169 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-01T18:17:59,259 INFO [M:0;b8365d49b74c:35169 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T18:17:59,260 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:17:59,260 DEBUG [M:0;b8365d49b74c:35169 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:17:59,260 INFO [M:0;b8365d49b74c:35169 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:17:59,260 DEBUG [M:0;b8365d49b74c:35169 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:17:59,260 DEBUG [M:0;b8365d49b74c:35169 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:17:59,260 DEBUG [M:0;b8365d49b74c:35169 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:17:59,260 INFO [M:0;b8365d49b74c:35169 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=65.06 KB heapSize=81.67 KB 2024-12-01T18:17:59,276 DEBUG [M:0;b8365d49b74c:35169 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/00907e3203f24dbf890bb08f55e0b0e9 is 82, key is hbase:meta,,1/info:regioninfo/1733077018152/Put/seqid=0 2024-12-01T18:17:59,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741851_1027 (size=5672) 2024-12-01T18:17:59,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741851_1027 (size=5672) 2024-12-01T18:17:59,282 INFO [M:0;b8365d49b74c:35169 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/00907e3203f24dbf890bb08f55e0b0e9 2024-12-01T18:17:59,301 DEBUG [M:0;b8365d49b74c:35169 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4c86e88c16a246e1a3cfdad046d9bf41 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733077019102/Put/seqid=0 2024-12-01T18:17:59,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741852_1028 (size=8354) 2024-12-01T18:17:59,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741852_1028 (size=8354) 2024-12-01T18:17:59,306 INFO [M:0;b8365d49b74c:35169 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.46 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4c86e88c16a246e1a3cfdad046d9bf41 2024-12-01T18:17:59,311 INFO [M:0;b8365d49b74c:35169 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4c86e88c16a246e1a3cfdad046d9bf41 2024-12-01T18:17:59,325 DEBUG [M:0;b8365d49b74c:35169 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/028c523877cb4ce78b3dbe20f56e9932 is 69, key is b8365d49b74c,41713,1733077017384/rs:state/1733077017634/Put/seqid=0 2024-12-01T18:17:59,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741853_1029 (size=5156) 2024-12-01T18:17:59,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741853_1029 (size=5156) 2024-12-01T18:17:59,330 INFO [M:0;b8365d49b74c:35169 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/028c523877cb4ce78b3dbe20f56e9932 2024-12-01T18:17:59,349 DEBUG [M:0;b8365d49b74c:35169 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/461bf3dad9124591b52739007f1a1545 is 52, key is load_balancer_on/state:d/1733077018726/Put/seqid=0 2024-12-01T18:17:59,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741854_1030 (size=5056) 2024-12-01T18:17:59,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741854_1030 (size=5056) 2024-12-01T18:17:59,354 INFO [M:0;b8365d49b74c:35169 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/461bf3dad9124591b52739007f1a1545 2024-12-01T18:17:59,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:17:59,356 INFO [RS:0;b8365d49b74c:41713 {}] regionserver.HRegionServer(1307): Exiting; stopping=b8365d49b74c,41713,1733077017384; zookeeper connection closed. 2024-12-01T18:17:59,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1004ecc5ba50001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:17:59,356 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1b552ea {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1b552ea 2024-12-01T18:17:59,356 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-01T18:17:59,360 DEBUG [M:0;b8365d49b74c:35169 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/00907e3203f24dbf890bb08f55e0b0e9 as hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/00907e3203f24dbf890bb08f55e0b0e9 2024-12-01T18:17:59,365 INFO [M:0;b8365d49b74c:35169 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/00907e3203f24dbf890bb08f55e0b0e9, entries=8, sequenceid=184, filesize=5.5 K 2024-12-01T18:17:59,366 DEBUG [M:0;b8365d49b74c:35169 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4c86e88c16a246e1a3cfdad046d9bf41 as hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4c86e88c16a246e1a3cfdad046d9bf41 2024-12-01T18:17:59,371 INFO [M:0;b8365d49b74c:35169 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4c86e88c16a246e1a3cfdad046d9bf41 2024-12-01T18:17:59,371 INFO [M:0;b8365d49b74c:35169 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4c86e88c16a246e1a3cfdad046d9bf41, entries=21, sequenceid=184, filesize=8.2 K 2024-12-01T18:17:59,372 DEBUG [M:0;b8365d49b74c:35169 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/028c523877cb4ce78b3dbe20f56e9932 as hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/028c523877cb4ce78b3dbe20f56e9932 2024-12-01T18:17:59,376 INFO [M:0;b8365d49b74c:35169 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/028c523877cb4ce78b3dbe20f56e9932, entries=1, sequenceid=184, filesize=5.0 K 2024-12-01T18:17:59,377 DEBUG [M:0;b8365d49b74c:35169 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/461bf3dad9124591b52739007f1a1545 as hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/461bf3dad9124591b52739007f1a1545 2024-12-01T18:17:59,381 INFO [M:0;b8365d49b74c:35169 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44441/user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/461bf3dad9124591b52739007f1a1545, entries=1, sequenceid=184, filesize=4.9 K 2024-12-01T18:17:59,382 INFO [M:0;b8365d49b74c:35169 {}] regionserver.HRegion(3040): Finished flush of dataSize ~65.06 KB/66619, heapSize ~81.61 KB/83568, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=184, compaction requested=false 2024-12-01T18:17:59,383 INFO [M:0;b8365d49b74c:35169 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:17:59,383 DEBUG [M:0;b8365d49b74c:35169 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:17:59,384 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/b8f534d5-1d74-a188-1b6a-58a56f2bdbae/MasterData/WALs/b8365d49b74c,35169,1733077017309 2024-12-01T18:17:59,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741830_1006 (size=79140) 2024-12-01T18:17:59,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37113 is added to blk_1073741830_1006 (size=79140) 2024-12-01T18:17:59,386 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-01T18:17:59,386 INFO [M:0;b8365d49b74c:35169 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-01T18:17:59,386 INFO [M:0;b8365d49b74c:35169 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35169 2024-12-01T18:17:59,388 DEBUG [M:0;b8365d49b74c:35169 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/b8365d49b74c,35169,1733077017309 already deleted, retry=false 2024-12-01T18:17:59,490 INFO [M:0;b8365d49b74c:35169 {}] regionserver.HRegionServer(1307): Exiting; stopping=b8365d49b74c,35169,1733077017309; zookeeper connection closed. 2024-12-01T18:17:59,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:17:59,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1004ecc5ba50000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:17:59,493 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@34b4a513{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:17:59,493 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@706d0530{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:17:59,493 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:17:59,493 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55215408{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:17:59,493 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@152be98f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/hadoop.log.dir/,STOPPED} 2024-12-01T18:17:59,495 WARN [BP-569714203-172.17.0.2-1733077016602 heartbeating to localhost/127.0.0.1:44441 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:17:59,495 WARN [BP-569714203-172.17.0.2-1733077016602 heartbeating to localhost/127.0.0.1:44441 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-569714203-172.17.0.2-1733077016602 (Datanode Uuid 24164c7c-2d49-4ce9-8b1a-ce5b6e8c32ea) service to localhost/127.0.0.1:44441 2024-12-01T18:17:59,495 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:17:59,495 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:17:59,495 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/cluster_ce066afb-d063-e204-6b1a-430c3fce4fab/dfs/data/data3/current/BP-569714203-172.17.0.2-1733077016602 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:17:59,495 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/cluster_ce066afb-d063-e204-6b1a-430c3fce4fab/dfs/data/data4/current/BP-569714203-172.17.0.2-1733077016602 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:17:59,496 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:17:59,497 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@73a57b63{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:17:59,498 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7158c214{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:17:59,498 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:17:59,498 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d38e2e3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:17:59,498 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@676eacdf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/hadoop.log.dir/,STOPPED} 2024-12-01T18:17:59,499 WARN [BP-569714203-172.17.0.2-1733077016602 heartbeating to localhost/127.0.0.1:44441 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:17:59,499 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:17:59,499 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:17:59,499 WARN [BP-569714203-172.17.0.2-1733077016602 heartbeating to localhost/127.0.0.1:44441 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-569714203-172.17.0.2-1733077016602 (Datanode Uuid 87bbe3d1-282f-4a3e-9baf-4faf058f58df) service to localhost/127.0.0.1:44441 2024-12-01T18:17:59,500 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/cluster_ce066afb-d063-e204-6b1a-430c3fce4fab/dfs/data/data1/current/BP-569714203-172.17.0.2-1733077016602 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:17:59,500 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/cluster_ce066afb-d063-e204-6b1a-430c3fce4fab/dfs/data/data2/current/BP-569714203-172.17.0.2-1733077016602 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:17:59,500 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:17:59,506 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4bb512f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:17:59,506 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ccc3bba{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:17:59,507 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:17:59,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@233e6490{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:17:59,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cc661a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/hadoop.log.dir/,STOPPED} 2024-12-01T18:17:59,513 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-01T18:17:59,541 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-01T18:17:59,549 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=110 (was 100) - Thread LEAK? -, OpenFileDescriptor=466 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=114 (was 131), ProcessCount=11 (was 11), AvailableMemoryMB=2881 (was 3192) 2024-12-01T18:17:59,556 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=111, OpenFileDescriptor=466, MaxFileDescriptor=1048576, SystemLoadAverage=114, ProcessCount=11, AvailableMemoryMB=2881 2024-12-01T18:17:59,556 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T18:17:59,556 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/hadoop.log.dir so I do NOT create it in target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d 2024-12-01T18:17:59,556 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f2489126-99ff-4fd0-988b-7c2b378e2b34/hadoop.tmp.dir so I do NOT create it in target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d 2024-12-01T18:17:59,557 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/cluster_c8794203-4bde-4c2b-95b3-c3db3f2b7147, deleteOnExit=true 2024-12-01T18:17:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-01T18:17:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/test.cache.data in system properties and HBase conf 2024-12-01T18:17:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T18:17:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/hadoop.log.dir in system properties and HBase conf 2024-12-01T18:17:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T18:17:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T18:17:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-01T18:17:59,557 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T18:17:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:17:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:17:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T18:17:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:17:59,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T18:17:59,558 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T18:17:59,558 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:17:59,558 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:17:59,558 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T18:17:59,558 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/nfs.dump.dir in system properties and HBase conf 2024-12-01T18:17:59,558 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/java.io.tmpdir in system properties and HBase conf 2024-12-01T18:17:59,558 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:17:59,558 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T18:17:59,558 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T18:17:59,571 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-01T18:17:59,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:17:59,633 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:17:59,637 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:17:59,638 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:17:59,638 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:17:59,639 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:17:59,639 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:17:59,640 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1297c717{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:17:59,640 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7cad81e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:17:59,651 INFO [regionserver/b8365d49b74c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:17:59,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@34cb6972{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/java.io.tmpdir/jetty-localhost-43539-hadoop-hdfs-3_4_1-tests_jar-_-any-8071212948673226906/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:17:59,759 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@191bd0c3{HTTP/1.1, (http/1.1)}{localhost:43539} 2024-12-01T18:17:59,759 INFO [Time-limited test {}] server.Server(415): Started @287053ms 2024-12-01T18:17:59,772 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-01T18:17:59,823 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:17:59,828 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:17:59,828 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:17:59,828 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:17:59,828 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:17:59,829 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@555a9157{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:17:59,829 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bf6e2c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:17:59,943 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4d11ec04{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/java.io.tmpdir/jetty-localhost-34255-hadoop-hdfs-3_4_1-tests_jar-_-any-4732302895672766795/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:17:59,944 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17c78fce{HTTP/1.1, (http/1.1)}{localhost:34255} 2024-12-01T18:17:59,944 INFO [Time-limited test {}] server.Server(415): Started @287237ms 2024-12-01T18:17:59,945 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:17:59,974 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:17:59,977 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:17:59,978 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:17:59,978 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:17:59,978 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:17:59,978 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33dd8821{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:17:59,978 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4be9feb9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:18:00,029 WARN [Thread-1692 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/cluster_c8794203-4bde-4c2b-95b3-c3db3f2b7147/dfs/data/data1/current/BP-487273436-172.17.0.2-1733077079579/current, will proceed with Du for space computation calculation, 2024-12-01T18:18:00,029 WARN [Thread-1693 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/cluster_c8794203-4bde-4c2b-95b3-c3db3f2b7147/dfs/data/data2/current/BP-487273436-172.17.0.2-1733077079579/current, will proceed with Du for space computation calculation, 2024-12-01T18:18:00,048 WARN [Thread-1671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:18:00,051 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55594f509f768855 with lease ID 0xe40c18944f921818: Processing first storage report for DS-91a51110-2bf5-4396-97c2-c45a89b30d00 from datanode DatanodeRegistration(127.0.0.1:41531, datanodeUuid=4b59fa24-905f-4b4f-95e8-423bc64e71a5, infoPort=46197, infoSecurePort=0, ipcPort=40219, storageInfo=lv=-57;cid=testClusterID;nsid=1286982908;c=1733077079579) 2024-12-01T18:18:00,051 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55594f509f768855 with lease ID 0xe40c18944f921818: from storage DS-91a51110-2bf5-4396-97c2-c45a89b30d00 node DatanodeRegistration(127.0.0.1:41531, datanodeUuid=4b59fa24-905f-4b4f-95e8-423bc64e71a5, infoPort=46197, infoSecurePort=0, ipcPort=40219, storageInfo=lv=-57;cid=testClusterID;nsid=1286982908;c=1733077079579), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:18:00,051 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55594f509f768855 with lease ID 0xe40c18944f921818: Processing first storage report for DS-b2ca5a9b-fe73-415a-baf4-d1e32373ed62 from datanode DatanodeRegistration(127.0.0.1:41531, datanodeUuid=4b59fa24-905f-4b4f-95e8-423bc64e71a5, infoPort=46197, infoSecurePort=0, ipcPort=40219, storageInfo=lv=-57;cid=testClusterID;nsid=1286982908;c=1733077079579) 2024-12-01T18:18:00,051 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55594f509f768855 with lease ID 0xe40c18944f921818: from storage DS-b2ca5a9b-fe73-415a-baf4-d1e32373ed62 node DatanodeRegistration(127.0.0.1:41531, datanodeUuid=4b59fa24-905f-4b4f-95e8-423bc64e71a5, infoPort=46197, infoSecurePort=0, ipcPort=40219, storageInfo=lv=-57;cid=testClusterID;nsid=1286982908;c=1733077079579), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:18:00,094 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@434b09e4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/java.io.tmpdir/jetty-localhost-41691-hadoop-hdfs-3_4_1-tests_jar-_-any-2271183491045018217/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:18:00,094 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17b5bb29{HTTP/1.1, (http/1.1)}{localhost:41691} 2024-12-01T18:18:00,094 INFO [Time-limited test {}] server.Server(415): Started @287388ms 2024-12-01T18:18:00,095 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:18:00,177 WARN [Thread-1718 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/cluster_c8794203-4bde-4c2b-95b3-c3db3f2b7147/dfs/data/data3/current/BP-487273436-172.17.0.2-1733077079579/current, will proceed with Du for space computation calculation, 2024-12-01T18:18:00,177 WARN [Thread-1719 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/cluster_c8794203-4bde-4c2b-95b3-c3db3f2b7147/dfs/data/data4/current/BP-487273436-172.17.0.2-1733077079579/current, will proceed with Du for space computation calculation, 2024-12-01T18:18:00,200 WARN [Thread-1707 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:18:00,202 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd54c0e9e9ab900cb with lease ID 0xe40c18944f921819: Processing first storage report for DS-62c09e99-8605-4b65-9030-9af7ae16aaba from datanode DatanodeRegistration(127.0.0.1:45821, datanodeUuid=5b572541-fb21-4c36-919e-521eec6cafa6, infoPort=38401, infoSecurePort=0, ipcPort=42309, storageInfo=lv=-57;cid=testClusterID;nsid=1286982908;c=1733077079579) 2024-12-01T18:18:00,202 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd54c0e9e9ab900cb with lease ID 0xe40c18944f921819: from storage DS-62c09e99-8605-4b65-9030-9af7ae16aaba node DatanodeRegistration(127.0.0.1:45821, datanodeUuid=5b572541-fb21-4c36-919e-521eec6cafa6, infoPort=38401, infoSecurePort=0, ipcPort=42309, storageInfo=lv=-57;cid=testClusterID;nsid=1286982908;c=1733077079579), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:18:00,202 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd54c0e9e9ab900cb with lease ID 0xe40c18944f921819: Processing first storage report for DS-e830491b-31d8-469c-abe9-5f1e3dd1d94e from datanode DatanodeRegistration(127.0.0.1:45821, datanodeUuid=5b572541-fb21-4c36-919e-521eec6cafa6, infoPort=38401, infoSecurePort=0, ipcPort=42309, storageInfo=lv=-57;cid=testClusterID;nsid=1286982908;c=1733077079579) 2024-12-01T18:18:00,202 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd54c0e9e9ab900cb with lease ID 0xe40c18944f921819: from storage DS-e830491b-31d8-469c-abe9-5f1e3dd1d94e node DatanodeRegistration(127.0.0.1:45821, datanodeUuid=5b572541-fb21-4c36-919e-521eec6cafa6, infoPort=38401, infoSecurePort=0, ipcPort=42309, storageInfo=lv=-57;cid=testClusterID;nsid=1286982908;c=1733077079579), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:18:00,220 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d 2024-12-01T18:18:00,223 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/cluster_c8794203-4bde-4c2b-95b3-c3db3f2b7147/zookeeper_0, clientPort=52110, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/cluster_c8794203-4bde-4c2b-95b3-c3db3f2b7147/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/cluster_c8794203-4bde-4c2b-95b3-c3db3f2b7147/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T18:18:00,224 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=52110 2024-12-01T18:18:00,224 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:18:00,226 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:18:00,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:18:00,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:18:00,236 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171 with version=8 2024-12-01T18:18:00,236 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/hbase-staging 2024-12-01T18:18:00,239 INFO [Time-limited test {}] client.ConnectionUtils(129): master/b8365d49b74c:0 server-side Connection retries=45 2024-12-01T18:18:00,239 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:18:00,239 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:18:00,239 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:18:00,239 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:18:00,239 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:18:00,239 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:18:00,239 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:18:00,240 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33181 2024-12-01T18:18:00,241 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:18:00,243 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:18:00,246 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:33181 connecting to ZooKeeper ensemble=127.0.0.1:52110 2024-12-01T18:18:00,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:331810x0, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:18:00,252 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33181-0x1004ecd51850000 connected 2024-12-01T18:18:00,266 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:18:00,267 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:18:00,267 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:18:00,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33181 2024-12-01T18:18:00,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33181 2024-12-01T18:18:00,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33181 2024-12-01T18:18:00,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33181 2024-12-01T18:18:00,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33181 2024-12-01T18:18:00,269 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171, hbase.cluster.distributed=false 2024-12-01T18:18:00,285 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b8365d49b74c:0 server-side Connection retries=45 2024-12-01T18:18:00,285 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:18:00,285 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:18:00,285 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:18:00,285 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:18:00,285 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:18:00,285 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:18:00,285 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:18:00,286 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41383 2024-12-01T18:18:00,286 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T18:18:00,287 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T18:18:00,288 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:18:00,289 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:18:00,292 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41383 connecting to ZooKeeper ensemble=127.0.0.1:52110 2024-12-01T18:18:00,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413830x0, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:18:00,295 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41383-0x1004ecd51850001 connected 2024-12-01T18:18:00,295 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:18:00,295 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:18:00,296 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:18:00,296 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41383 2024-12-01T18:18:00,296 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41383 2024-12-01T18:18:00,297 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41383 2024-12-01T18:18:00,297 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41383 2024-12-01T18:18:00,297 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41383 2024-12-01T18:18:00,298 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/b8365d49b74c,33181,1733077080238 2024-12-01T18:18:00,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:18:00,300 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b8365d49b74c,33181,1733077080238 2024-12-01T18:18:00,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:18:00,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:00,302 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:18:00,302 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b8365d49b74c,33181,1733077080238 from backup master directory 2024-12-01T18:18:00,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b8365d49b74c,33181,1733077080238 2024-12-01T18:18:00,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:18:00,303 WARN [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:18:00,303 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:18:00,303 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b8365d49b74c,33181,1733077080238 2024-12-01T18:18:00,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:18:00,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:18:00,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:00,316 DEBUG [M:0;b8365d49b74c:33181 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b8365d49b74c:33181 2024-12-01T18:18:00,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:18:00,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:18:00,318 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/hbase.id with ID: df78b302-548a-464c-b0fe-99e3d5cee5e6 2024-12-01T18:18:00,329 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:18:00,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:00,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:00,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:18:00,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:18:00,341 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:18:00,342 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T18:18:00,342 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:18:00,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:18:00,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:18:00,355 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store 2024-12-01T18:18:00,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:18:00,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:18:00,364 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:18:00,364 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:18:00,364 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:18:00,364 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:18:00,364 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:18:00,364 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:18:00,364 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:18:00,364 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:18:00,365 WARN [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/.initializing 2024-12-01T18:18:00,365 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/WALs/b8365d49b74c,33181,1733077080238 2024-12-01T18:18:00,368 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C33181%2C1733077080238, suffix=, logDir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/WALs/b8365d49b74c,33181,1733077080238, archiveDir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/oldWALs, maxLogs=10 2024-12-01T18:18:00,368 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C33181%2C1733077080238.1733077080368 2024-12-01T18:18:00,376 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/WALs/b8365d49b74c,33181,1733077080238/b8365d49b74c%2C33181%2C1733077080238.1733077080368 2024-12-01T18:18:00,376 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46197:46197),(127.0.0.1/127.0.0.1:38401:38401)] 2024-12-01T18:18:00,376 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:18:00,376 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:18:00,376 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:18:00,376 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:18:00,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:18:00,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T18:18:00,380 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:00,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:18:00,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:18:00,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T18:18:00,382 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:00,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:18:00,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:18:00,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T18:18:00,384 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:00,385 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:18:00,385 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:18:00,386 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T18:18:00,386 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:00,386 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:18:00,387 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:18:00,387 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:18:00,389 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T18:18:00,390 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:18:00,393 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:18:00,394 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778083, jitterRate=-0.010616615414619446}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T18:18:00,395 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:18:00,395 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T18:18:00,400 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2850e5e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:18:00,401 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-01T18:18:00,401 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T18:18:00,401 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T18:18:00,401 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T18:18:00,402 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-01T18:18:00,402 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-01T18:18:00,402 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T18:18:00,404 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T18:18:00,405 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T18:18:00,406 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-01T18:18:00,407 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T18:18:00,407 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T18:18:00,409 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-01T18:18:00,409 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T18:18:00,410 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T18:18:00,412 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-01T18:18:00,412 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T18:18:00,414 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T18:18:00,415 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T18:18:00,416 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T18:18:00,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:18:00,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:18:00,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:00,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:00,418 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=b8365d49b74c,33181,1733077080238, sessionid=0x1004ecd51850000, setting cluster-up flag (Was=false) 2024-12-01T18:18:00,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:00,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:00,428 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T18:18:00,429 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b8365d49b74c,33181,1733077080238 2024-12-01T18:18:00,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:00,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:00,437 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T18:18:00,438 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b8365d49b74c,33181,1733077080238 2024-12-01T18:18:00,440 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-01T18:18:00,441 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-01T18:18:00,441 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T18:18:00,441 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b8365d49b74c,33181,1733077080238 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T18:18:00,441 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:18:00,441 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:18:00,441 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:18:00,441 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:18:00,442 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b8365d49b74c:0, corePoolSize=10, maxPoolSize=10 2024-12-01T18:18:00,442 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:18:00,442 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b8365d49b74c:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:18:00,442 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:18:00,443 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733077110442 2024-12-01T18:18:00,443 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T18:18:00,443 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T18:18:00,443 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T18:18:00,443 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:18:00,443 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T18:18:00,443 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-01T18:18:00,443 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T18:18:00,443 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T18:18:00,443 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:00,444 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T18:18:00,444 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T18:18:00,444 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T18:18:00,444 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:00,444 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:18:00,444 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T18:18:00,444 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T18:18:00,444 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733077080444,5,FailOnTimeoutGroup] 2024-12-01T18:18:00,445 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733077080445,5,FailOnTimeoutGroup] 2024-12-01T18:18:00,445 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:00,445 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T18:18:00,445 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:00,445 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:00,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741831_1007 (size=1039) 2024-12-01T18:18:00,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741831_1007 (size=1039) 2024-12-01T18:18:00,455 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-01T18:18:00,455 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171 2024-12-01T18:18:00,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:18:00,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:18:00,463 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:18:00,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:18:00,465 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:18:00,465 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:00,466 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:18:00,466 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:18:00,467 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:18:00,467 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:00,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:18:00,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:18:00,468 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:18:00,468 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:00,469 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:18:00,469 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740 2024-12-01T18:18:00,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740 2024-12-01T18:18:00,471 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-01T18:18:00,472 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-01T18:18:00,473 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:18:00,474 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716007, jitterRate=-0.08955115079879761}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-01T18:18:00,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-01T18:18:00,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:18:00,474 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:18:00,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:18:00,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:18:00,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:18:00,474 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-01T18:18:00,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:18:00,475 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:18:00,475 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-01T18:18:00,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T18:18:00,477 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T18:18:00,477 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T18:18:00,510 DEBUG [RS:0;b8365d49b74c:41383 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b8365d49b74c:41383 2024-12-01T18:18:00,511 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(1008): ClusterId : df78b302-548a-464c-b0fe-99e3d5cee5e6 2024-12-01T18:18:00,511 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T18:18:00,513 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T18:18:00,513 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T18:18:00,515 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T18:18:00,516 DEBUG [RS:0;b8365d49b74c:41383 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33ff83d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:18:00,516 DEBUG [RS:0;b8365d49b74c:41383 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@476c8fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b8365d49b74c/172.17.0.2:0 2024-12-01T18:18:00,516 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-01T18:18:00,516 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-01T18:18:00,516 DEBUG [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-01T18:18:00,517 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(3073): reportForDuty to master=b8365d49b74c,33181,1733077080238 with isa=b8365d49b74c/172.17.0.2:41383, startcode=1733077080285 2024-12-01T18:18:00,517 DEBUG [RS:0;b8365d49b74c:41383 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:18:00,519 INFO [RS-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47995, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:18:00,519 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33181 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b8365d49b74c,41383,1733077080285 2024-12-01T18:18:00,519 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33181 {}] master.ServerManager(486): Registering regionserver=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:00,520 DEBUG [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171 2024-12-01T18:18:00,520 DEBUG [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:37907 2024-12-01T18:18:00,520 DEBUG [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-01T18:18:00,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:18:00,522 DEBUG [RS:0;b8365d49b74c:41383 {}] zookeeper.ZKUtil(111): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b8365d49b74c,41383,1733077080285 2024-12-01T18:18:00,522 WARN [RS:0;b8365d49b74c:41383 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:18:00,522 INFO [RS:0;b8365d49b74c:41383 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:18:00,522 DEBUG [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285 2024-12-01T18:18:00,523 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b8365d49b74c,41383,1733077080285] 2024-12-01T18:18:00,526 DEBUG [RS:0;b8365d49b74c:41383 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-01T18:18:00,526 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T18:18:00,527 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T18:18:00,528 INFO [RS:0;b8365d49b74c:41383 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:18:00,528 INFO [RS:0;b8365d49b74c:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:00,528 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-01T18:18:00,529 INFO [RS:0;b8365d49b74c:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:00,529 DEBUG [RS:0;b8365d49b74c:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:18:00,529 DEBUG [RS:0;b8365d49b74c:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:18:00,529 DEBUG [RS:0;b8365d49b74c:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:18:00,529 DEBUG [RS:0;b8365d49b74c:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:18:00,529 DEBUG [RS:0;b8365d49b74c:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:18:00,529 DEBUG [RS:0;b8365d49b74c:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b8365d49b74c:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:18:00,529 DEBUG [RS:0;b8365d49b74c:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:18:00,529 DEBUG [RS:0;b8365d49b74c:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:18:00,529 DEBUG [RS:0;b8365d49b74c:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:18:00,529 DEBUG [RS:0;b8365d49b74c:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:18:00,529 DEBUG [RS:0;b8365d49b74c:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:18:00,529 DEBUG [RS:0;b8365d49b74c:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:18:00,529 DEBUG [RS:0;b8365d49b74c:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:18:00,529 INFO [RS:0;b8365d49b74c:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:00,530 INFO [RS:0;b8365d49b74c:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:00,530 INFO [RS:0;b8365d49b74c:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:00,530 INFO [RS:0;b8365d49b74c:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:00,530 INFO [RS:0;b8365d49b74c:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41383,1733077080285-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:18:00,545 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T18:18:00,545 INFO [RS:0;b8365d49b74c:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,41383,1733077080285-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:00,559 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.Replication(204): b8365d49b74c,41383,1733077080285 started 2024-12-01T18:18:00,559 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(1767): Serving as b8365d49b74c,41383,1733077080285, RpcServer on b8365d49b74c/172.17.0.2:41383, sessionid=0x1004ecd51850001 2024-12-01T18:18:00,559 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T18:18:00,559 DEBUG [RS:0;b8365d49b74c:41383 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b8365d49b74c,41383,1733077080285 2024-12-01T18:18:00,559 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,41383,1733077080285' 2024-12-01T18:18:00,559 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T18:18:00,560 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T18:18:00,560 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T18:18:00,560 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T18:18:00,560 DEBUG [RS:0;b8365d49b74c:41383 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b8365d49b74c,41383,1733077080285 2024-12-01T18:18:00,560 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,41383,1733077080285' 2024-12-01T18:18:00,560 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T18:18:00,560 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T18:18:00,561 DEBUG [RS:0;b8365d49b74c:41383 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T18:18:00,561 INFO [RS:0;b8365d49b74c:41383 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T18:18:00,561 INFO [RS:0;b8365d49b74c:41383 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T18:18:00,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:00,628 WARN [b8365d49b74c:33181 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-01T18:18:00,662 INFO [RS:0;b8365d49b74c:41383 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C41383%2C1733077080285, suffix=, logDir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285, archiveDir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/oldWALs, maxLogs=32 2024-12-01T18:18:00,663 INFO [RS:0;b8365d49b74c:41383 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41383%2C1733077080285.1733077080663 2024-12-01T18:18:00,669 INFO [RS:0;b8365d49b74c:41383 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285/b8365d49b74c%2C41383%2C1733077080285.1733077080663 2024-12-01T18:18:00,669 DEBUG [RS:0;b8365d49b74c:41383 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38401:38401),(127.0.0.1/127.0.0.1:46197:46197)] 2024-12-01T18:18:00,878 DEBUG [b8365d49b74c:33181 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-01T18:18:00,878 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:00,879 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b8365d49b74c,41383,1733077080285, state=OPENING 2024-12-01T18:18:00,881 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T18:18:00,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:00,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:00,883 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=b8365d49b74c,41383,1733077080285}] 2024-12-01T18:18:00,883 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:18:00,883 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:18:01,035 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41383,1733077080285 2024-12-01T18:18:01,035 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T18:18:01,037 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51092, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T18:18:01,041 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-01T18:18:01,041 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:18:01,043 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C41383%2C1733077080285.meta, suffix=.meta, logDir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285, archiveDir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/oldWALs, maxLogs=32 2024-12-01T18:18:01,044 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41383%2C1733077080285.meta.1733077081044.meta 2024-12-01T18:18:01,053 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285/b8365d49b74c%2C41383%2C1733077080285.meta.1733077081044.meta 2024-12-01T18:18:01,053 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38401:38401),(127.0.0.1/127.0.0.1:46197:46197)] 2024-12-01T18:18:01,053 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:18:01,053 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T18:18:01,053 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T18:18:01,053 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T18:18:01,054 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T18:18:01,054 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:18:01,054 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-01T18:18:01,054 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-01T18:18:01,055 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:18:01,056 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:18:01,056 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:01,056 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:18:01,056 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:18:01,057 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:18:01,057 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:01,057 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:18:01,058 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:18:01,058 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:18:01,058 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:01,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:18:01,059 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740 2024-12-01T18:18:01,060 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740 2024-12-01T18:18:01,061 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-01T18:18:01,063 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-01T18:18:01,063 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810811, jitterRate=0.031000077724456787}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-01T18:18:01,064 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-01T18:18:01,064 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733077081035 2024-12-01T18:18:01,066 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T18:18:01,066 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-01T18:18:01,066 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:01,067 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b8365d49b74c,41383,1733077080285, state=OPEN 2024-12-01T18:18:01,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:18:01,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:18:01,072 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:18:01,072 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:18:01,074 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T18:18:01,074 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=b8365d49b74c,41383,1733077080285 in 189 msec 2024-12-01T18:18:01,075 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T18:18:01,075 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 598 msec 2024-12-01T18:18:01,077 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 637 msec 2024-12-01T18:18:01,077 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733077081077, completionTime=-1 2024-12-01T18:18:01,077 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-01T18:18:01,077 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-01T18:18:01,078 DEBUG [hconnection-0x554a0334-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:18:01,079 INFO [RS-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51106, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:18:01,080 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-01T18:18:01,080 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733077141080 2024-12-01T18:18:01,080 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733077201080 2024-12-01T18:18:01,080 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-01T18:18:01,086 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,33181,1733077080238-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:01,086 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,33181,1733077080238-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:01,086 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,33181,1733077080238-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:01,086 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b8365d49b74c:33181, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:01,086 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:01,086 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-01T18:18:01,087 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:18:01,088 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-01T18:18:01,088 DEBUG [master/b8365d49b74c:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-01T18:18:01,089 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T18:18:01,089 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:01,090 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T18:18:01,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741835_1011 (size=358) 2024-12-01T18:18:01,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741835_1011 (size=358) 2024-12-01T18:18:01,098 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ef4cc29d327f6d110395be9a9161ded7, NAME => 'hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171 2024-12-01T18:18:01,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741836_1012 (size=42) 2024-12-01T18:18:01,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741836_1012 (size=42) 2024-12-01T18:18:01,105 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:18:01,105 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing ef4cc29d327f6d110395be9a9161ded7, disabling compactions & flushes 2024-12-01T18:18:01,105 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. 2024-12-01T18:18:01,105 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. 2024-12-01T18:18:01,105 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. after waiting 0 ms 2024-12-01T18:18:01,105 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. 2024-12-01T18:18:01,105 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. 2024-12-01T18:18:01,105 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for ef4cc29d327f6d110395be9a9161ded7: 2024-12-01T18:18:01,106 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T18:18:01,106 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733077081106"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733077081106"}]},"ts":"1733077081106"} 2024-12-01T18:18:01,108 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-01T18:18:01,109 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T18:18:01,109 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733077081109"}]},"ts":"1733077081109"} 2024-12-01T18:18:01,110 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-01T18:18:01,115 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=ef4cc29d327f6d110395be9a9161ded7, ASSIGN}] 2024-12-01T18:18:01,115 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=ef4cc29d327f6d110395be9a9161ded7, ASSIGN 2024-12-01T18:18:01,116 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=ef4cc29d327f6d110395be9a9161ded7, ASSIGN; state=OFFLINE, location=b8365d49b74c,41383,1733077080285; forceNewPlan=false, retain=false 2024-12-01T18:18:01,267 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=ef4cc29d327f6d110395be9a9161ded7, regionState=OPENING, regionLocation=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:01,269 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure ef4cc29d327f6d110395be9a9161ded7, server=b8365d49b74c,41383,1733077080285}] 2024-12-01T18:18:01,421 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41383,1733077080285 2024-12-01T18:18:01,424 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. 2024-12-01T18:18:01,424 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => ef4cc29d327f6d110395be9a9161ded7, NAME => 'hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:18:01,425 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace ef4cc29d327f6d110395be9a9161ded7 2024-12-01T18:18:01,425 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:18:01,425 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for ef4cc29d327f6d110395be9a9161ded7 2024-12-01T18:18:01,425 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for ef4cc29d327f6d110395be9a9161ded7 2024-12-01T18:18:01,426 INFO [StoreOpener-ef4cc29d327f6d110395be9a9161ded7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ef4cc29d327f6d110395be9a9161ded7 2024-12-01T18:18:01,427 INFO [StoreOpener-ef4cc29d327f6d110395be9a9161ded7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef4cc29d327f6d110395be9a9161ded7 columnFamilyName info 2024-12-01T18:18:01,427 DEBUG [StoreOpener-ef4cc29d327f6d110395be9a9161ded7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:01,428 INFO [StoreOpener-ef4cc29d327f6d110395be9a9161ded7-1 {}] regionserver.HStore(327): Store=ef4cc29d327f6d110395be9a9161ded7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:18:01,428 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/namespace/ef4cc29d327f6d110395be9a9161ded7 2024-12-01T18:18:01,429 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/namespace/ef4cc29d327f6d110395be9a9161ded7 2024-12-01T18:18:01,431 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for ef4cc29d327f6d110395be9a9161ded7 2024-12-01T18:18:01,432 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/namespace/ef4cc29d327f6d110395be9a9161ded7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:18:01,433 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened ef4cc29d327f6d110395be9a9161ded7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=855623, jitterRate=0.0879817008972168}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:18:01,433 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for ef4cc29d327f6d110395be9a9161ded7: 2024-12-01T18:18:01,434 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7., pid=6, masterSystemTime=1733077081421 2024-12-01T18:18:01,435 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. 2024-12-01T18:18:01,435 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. 2024-12-01T18:18:01,436 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=ef4cc29d327f6d110395be9a9161ded7, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:01,439 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T18:18:01,440 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure ef4cc29d327f6d110395be9a9161ded7, server=b8365d49b74c,41383,1733077080285 in 169 msec 2024-12-01T18:18:01,441 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T18:18:01,441 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=ef4cc29d327f6d110395be9a9161ded7, ASSIGN in 325 msec 2024-12-01T18:18:01,442 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T18:18:01,442 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733077081442"}]},"ts":"1733077081442"} 2024-12-01T18:18:01,443 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-01T18:18:01,446 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T18:18:01,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 359 msec 2024-12-01T18:18:01,489 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-01T18:18:01,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:18:01,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:01,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:18:01,494 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-01T18:18:01,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:18:01,504 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 10 msec 2024-12-01T18:18:01,506 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-01T18:18:01,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:18:01,514 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 8 msec 2024-12-01T18:18:01,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-01T18:18:01,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-01T18:18:01,522 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.218sec 2024-12-01T18:18:01,522 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-01T18:18:01,522 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T18:18:01,522 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T18:18:01,522 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T18:18:01,522 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T18:18:01,522 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,33181,1733077080238-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:18:01,522 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,33181,1733077080238-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T18:18:01,524 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-01T18:18:01,524 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T18:18:01,524 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,33181,1733077080238-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:18:01,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:01,600 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7bfbd5bb to 127.0.0.1:52110 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5de2049e 2024-12-01T18:18:01,604 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@620fc06d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:18:01,606 DEBUG [hconnection-0x12e1f3d6-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:18:01,607 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51118, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:18:01,608 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=b8365d49b74c,33181,1733077080238 2024-12-01T18:18:01,609 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:18:01,611 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-01T18:18:01,611 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T18:18:01,613 INFO [RS-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36708, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T18:18:01,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33181 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-01T18:18:01,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33181 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-01T18:18:01,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33181 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:18:01,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33181 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-01T18:18:01,616 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T18:18:01,616 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:01,616 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33181 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 9 2024-12-01T18:18:01,617 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T18:18:01,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33181 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-01T18:18:01,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741837_1013 (size=381) 2024-12-01T18:18:01,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741837_1013 (size=381) 2024-12-01T18:18:01,625 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b658ffcf569fa9b7017ef7c6129a394c, NAME => 'TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171 2024-12-01T18:18:01,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741838_1014 (size=64) 2024-12-01T18:18:01,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741838_1014 (size=64) 2024-12-01T18:18:01,631 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:18:01,631 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing b658ffcf569fa9b7017ef7c6129a394c, disabling compactions & flushes 2024-12-01T18:18:01,631 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:01,631 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:01,631 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. after waiting 0 ms 2024-12-01T18:18:01,631 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:01,631 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:01,631 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for b658ffcf569fa9b7017ef7c6129a394c: 2024-12-01T18:18:01,632 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T18:18:01,633 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733077081632"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733077081632"}]},"ts":"1733077081632"} 2024-12-01T18:18:01,634 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-01T18:18:01,635 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T18:18:01,635 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733077081635"}]},"ts":"1733077081635"} 2024-12-01T18:18:01,636 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-01T18:18:01,640 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b658ffcf569fa9b7017ef7c6129a394c, ASSIGN}] 2024-12-01T18:18:01,641 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b658ffcf569fa9b7017ef7c6129a394c, ASSIGN 2024-12-01T18:18:01,642 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b658ffcf569fa9b7017ef7c6129a394c, ASSIGN; state=OFFLINE, location=b8365d49b74c,41383,1733077080285; forceNewPlan=false, retain=false 2024-12-01T18:18:01,792 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=b658ffcf569fa9b7017ef7c6129a394c, regionState=OPENING, regionLocation=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:01,794 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure b658ffcf569fa9b7017ef7c6129a394c, server=b8365d49b74c,41383,1733077080285}] 2024-12-01T18:18:01,946 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41383,1733077080285 2024-12-01T18:18:01,950 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:01,950 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => b658ffcf569fa9b7017ef7c6129a394c, NAME => 'TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:18:01,950 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:01,950 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:18:01,950 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:01,950 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:01,951 INFO [StoreOpener-b658ffcf569fa9b7017ef7c6129a394c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:01,953 INFO [StoreOpener-b658ffcf569fa9b7017ef7c6129a394c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b658ffcf569fa9b7017ef7c6129a394c columnFamilyName info 2024-12-01T18:18:01,953 DEBUG [StoreOpener-b658ffcf569fa9b7017ef7c6129a394c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:01,953 INFO [StoreOpener-b658ffcf569fa9b7017ef7c6129a394c-1 {}] regionserver.HStore(327): Store=b658ffcf569fa9b7017ef7c6129a394c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:18:01,954 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:01,954 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:01,956 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:01,958 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:18:01,958 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened b658ffcf569fa9b7017ef7c6129a394c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700031, jitterRate=-0.10986477136611938}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:18:01,959 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for b658ffcf569fa9b7017ef7c6129a394c: 2024-12-01T18:18:01,959 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c., pid=11, masterSystemTime=1733077081946 2024-12-01T18:18:01,961 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:01,961 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:01,962 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=b658ffcf569fa9b7017ef7c6129a394c, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:01,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-01T18:18:01,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure b658ffcf569fa9b7017ef7c6129a394c, server=b8365d49b74c,41383,1733077080285 in 169 msec 2024-12-01T18:18:01,967 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-01T18:18:01,967 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b658ffcf569fa9b7017ef7c6129a394c, ASSIGN in 325 msec 2024-12-01T18:18:01,967 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T18:18:01,967 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733077081967"}]},"ts":"1733077081967"} 2024-12-01T18:18:01,968 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-01T18:18:01,971 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T18:18:01,972 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRolling in 357 msec 2024-12-01T18:18:02,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:03,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:03,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,905 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,905 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,905 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:03,907 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,412 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-01T18:18:04,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:04,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:05,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:06,526 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-01T18:18:06,527 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-01T18:18:06,527 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-01T18:18:06,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:07,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:08,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:08,601 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-01T18:18:08,602 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-01T18:18:08,602 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-01T18:18:09,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:10,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:11,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:11,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33181 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-01T18:18:11,618 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling, procId: 9 completed 2024-12-01T18:18:11,621 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-01T18:18:11,621 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:11,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:11,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b658ffcf569fa9b7017ef7c6129a394c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-01T18:18:11,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/a0ceaddb92a64fddba24446009ab99e7 is 1080, key is row0001/info:/1733077091624/Put/seqid=0 2024-12-01T18:18:11,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741839_1015 (size=12509) 2024-12-01T18:18:11,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741839_1015 (size=12509) 2024-12-01T18:18:11,655 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/a0ceaddb92a64fddba24446009ab99e7 2024-12-01T18:18:11,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b658ffcf569fa9b7017ef7c6129a394c, server=b8365d49b74c,41383,1733077080285 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-01T18:18:11,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:51118 deadline: 1733077101656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b658ffcf569fa9b7017ef7c6129a394c, server=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:11,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/a0ceaddb92a64fddba24446009ab99e7 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/a0ceaddb92a64fddba24446009ab99e7 2024-12-01T18:18:11,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/a0ceaddb92a64fddba24446009ab99e7, entries=7, sequenceid=11, filesize=12.2 K 2024-12-01T18:18:11,669 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for b658ffcf569fa9b7017ef7c6129a394c in 37ms, sequenceid=11, compaction requested=false 2024-12-01T18:18:11,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b658ffcf569fa9b7017ef7c6129a394c: 2024-12-01T18:18:12,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:13,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:14,104 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-01T18:18:14,105 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,105 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,122 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,122 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,122 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,122 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:14,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:15,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:16,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:17,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:18,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:18,922 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=1, created chunk count=15, reused chunk count=36, reuseRatio=70.59% 2024-12-01T18:18:18,923 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-01T18:18:19,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:20,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:21,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:21,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:21,744 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b658ffcf569fa9b7017ef7c6129a394c 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-01T18:18:21,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/4d7b8c279a3940e486e870493922ed21 is 1080, key is row0008/info:/1733077091632/Put/seqid=0 2024-12-01T18:18:21,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741840_1016 (size=29761) 2024-12-01T18:18:21,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741840_1016 (size=29761) 2024-12-01T18:18:21,756 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/4d7b8c279a3940e486e870493922ed21 2024-12-01T18:18:21,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/4d7b8c279a3940e486e870493922ed21 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/4d7b8c279a3940e486e870493922ed21 2024-12-01T18:18:21,767 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/4d7b8c279a3940e486e870493922ed21, entries=23, sequenceid=37, filesize=29.1 K 2024-12-01T18:18:21,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for b658ffcf569fa9b7017ef7c6129a394c in 23ms, sequenceid=37, compaction requested=false 2024-12-01T18:18:21,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b658ffcf569fa9b7017ef7c6129a394c: 2024-12-01T18:18:21,768 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=41.3 K, sizeToCheck=16.0 K 2024-12-01T18:18:21,768 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-01T18:18:21,768 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/4d7b8c279a3940e486e870493922ed21 because midkey is the same as first or last row 2024-12-01T18:18:22,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:23,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:23,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b658ffcf569fa9b7017ef7c6129a394c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-01T18:18:23,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/605881721fb84e3c9b98578d5f0872ef is 1080, key is row0031/info:/1733077101745/Put/seqid=0 2024-12-01T18:18:23,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741841_1017 (size=12509) 2024-12-01T18:18:23,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741841_1017 (size=12509) 2024-12-01T18:18:23,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/605881721fb84e3c9b98578d5f0872ef 2024-12-01T18:18:23,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/605881721fb84e3c9b98578d5f0872ef as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/605881721fb84e3c9b98578d5f0872ef 2024-12-01T18:18:23,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/605881721fb84e3c9b98578d5f0872ef, entries=7, sequenceid=47, filesize=12.2 K 2024-12-01T18:18:23,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for b658ffcf569fa9b7017ef7c6129a394c in 24ms, sequenceid=47, compaction requested=true 2024-12-01T18:18:23,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b658ffcf569fa9b7017ef7c6129a394c: 2024-12-01T18:18:23,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:23,777 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=53.5 K, sizeToCheck=16.0 K 2024-12-01T18:18:23,777 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-01T18:18:23,777 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/4d7b8c279a3940e486e870493922ed21 because midkey is the same as first or last row 2024-12-01T18:18:23,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b658ffcf569fa9b7017ef7c6129a394c:info, priority=-2147483648, current under compaction store size is 1 2024-12-01T18:18:23,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:23,778 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-01T18:18:23,778 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b658ffcf569fa9b7017ef7c6129a394c 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-12-01T18:18:23,779 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-01T18:18:23,779 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1540): b658ffcf569fa9b7017ef7c6129a394c/info is initiating minor compaction (all files) 2024-12-01T18:18:23,779 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b658ffcf569fa9b7017ef7c6129a394c/info in TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:23,779 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/a0ceaddb92a64fddba24446009ab99e7, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/4d7b8c279a3940e486e870493922ed21, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/605881721fb84e3c9b98578d5f0872ef] into tmpdir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp, totalSize=53.5 K 2024-12-01T18:18:23,780 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0ceaddb92a64fddba24446009ab99e7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733077091624 2024-12-01T18:18:23,780 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d7b8c279a3940e486e870493922ed21, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733077091632 2024-12-01T18:18:23,781 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting 605881721fb84e3c9b98578d5f0872ef, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733077101745 2024-12-01T18:18:23,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/d3927189f33645e88d7a332d8d6b6ca4 is 1080, key is row0038/info:/1733077103753/Put/seqid=0 2024-12-01T18:18:23,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741842_1018 (size=29761) 2024-12-01T18:18:23,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741842_1018 (size=29761) 2024-12-01T18:18:23,789 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/d3927189f33645e88d7a332d8d6b6ca4 2024-12-01T18:18:23,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/d3927189f33645e88d7a332d8d6b6ca4 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/d3927189f33645e88d7a332d8d6b6ca4 2024-12-01T18:18:23,798 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b658ffcf569fa9b7017ef7c6129a394c#info#compaction#42 average throughput is 12.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-01T18:18:23,799 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/0a36ba5344334572b123dfd324537763 is 1080, key is row0001/info:/1733077091624/Put/seqid=0 2024-12-01T18:18:23,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/d3927189f33645e88d7a332d8d6b6ca4, entries=23, sequenceid=73, filesize=29.1 K 2024-12-01T18:18:23,802 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=4.20 KB/4304 for b658ffcf569fa9b7017ef7c6129a394c in 24ms, sequenceid=73, compaction requested=false 2024-12-01T18:18:23,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b658ffcf569fa9b7017ef7c6129a394c: 2024-12-01T18:18:23,803 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=82.6 K, sizeToCheck=16.0 K 2024-12-01T18:18:23,803 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-01T18:18:23,803 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/4d7b8c279a3940e486e870493922ed21 because midkey is the same as first or last row 2024-12-01T18:18:23,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741843_1019 (size=44978) 2024-12-01T18:18:23,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741843_1019 (size=44978) 2024-12-01T18:18:23,818 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/0a36ba5344334572b123dfd324537763 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/0a36ba5344334572b123dfd324537763 2024-12-01T18:18:23,824 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b658ffcf569fa9b7017ef7c6129a394c/info of b658ffcf569fa9b7017ef7c6129a394c into 0a36ba5344334572b123dfd324537763(size=43.9 K), total size for store is 73.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-01T18:18:23,824 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b658ffcf569fa9b7017ef7c6129a394c: 2024-12-01T18:18:23,824 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c., storeName=b658ffcf569fa9b7017ef7c6129a394c/info, priority=13, startTime=1733077103778; duration=0sec 2024-12-01T18:18:23,824 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=73.0 K, sizeToCheck=16.0 K 2024-12-01T18:18:23,824 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-01T18:18:23,824 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/0a36ba5344334572b123dfd324537763 because midkey is the same as first or last row 2024-12-01T18:18:23,824 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:23,824 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b658ffcf569fa9b7017ef7c6129a394c:info 2024-12-01T18:18:24,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:25,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,488 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:25,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:25,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:25,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b658ffcf569fa9b7017ef7c6129a394c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-01T18:18:25,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/510b47003fe9451e900f9c514c5e9d00 is 1080, key is row0061/info:/1733077103779/Put/seqid=0 2024-12-01T18:18:25,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741844_1020 (size=12509) 2024-12-01T18:18:25,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741844_1020 (size=12509) 2024-12-01T18:18:25,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/510b47003fe9451e900f9c514c5e9d00 2024-12-01T18:18:25,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/510b47003fe9451e900f9c514c5e9d00 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/510b47003fe9451e900f9c514c5e9d00 2024-12-01T18:18:25,810 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/510b47003fe9451e900f9c514c5e9d00, entries=7, sequenceid=84, filesize=12.2 K 2024-12-01T18:18:25,811 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for b658ffcf569fa9b7017ef7c6129a394c in 22ms, sequenceid=84, compaction requested=true 2024-12-01T18:18:25,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b658ffcf569fa9b7017ef7c6129a394c: 2024-12-01T18:18:25,811 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=85.2 K, sizeToCheck=16.0 K 2024-12-01T18:18:25,811 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-01T18:18:25,811 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/0a36ba5344334572b123dfd324537763 because midkey is the same as first or last row 2024-12-01T18:18:25,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b658ffcf569fa9b7017ef7c6129a394c:info, priority=-2147483648, current under compaction store size is 1 2024-12-01T18:18:25,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:25,811 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-01T18:18:25,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:25,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b658ffcf569fa9b7017ef7c6129a394c 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-12-01T18:18:25,812 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 87248 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-01T18:18:25,812 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1540): b658ffcf569fa9b7017ef7c6129a394c/info is initiating minor compaction (all files) 2024-12-01T18:18:25,812 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b658ffcf569fa9b7017ef7c6129a394c/info in TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:25,812 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/0a36ba5344334572b123dfd324537763, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/d3927189f33645e88d7a332d8d6b6ca4, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/510b47003fe9451e900f9c514c5e9d00] into tmpdir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp, totalSize=85.2 K 2024-12-01T18:18:25,813 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a36ba5344334572b123dfd324537763, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733077091624 2024-12-01T18:18:25,813 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3927189f33645e88d7a332d8d6b6ca4, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733077103753 2024-12-01T18:18:25,814 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting 510b47003fe9451e900f9c514c5e9d00, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1733077103779 2024-12-01T18:18:25,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/373ae11c663c475cae0a2c8463897de9 is 1080, key is row0068/info:/1733077105789/Put/seqid=0 2024-12-01T18:18:25,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741845_1021 (size=27607) 2024-12-01T18:18:25,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741845_1021 (size=27607) 2024-12-01T18:18:25,826 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=108 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/373ae11c663c475cae0a2c8463897de9 2024-12-01T18:18:25,830 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b658ffcf569fa9b7017ef7c6129a394c#info#compaction#45 average throughput is 22.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-01T18:18:25,831 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/6ff3197a34204987bf311c2e692858df is 1080, key is row0001/info:/1733077091624/Put/seqid=0 2024-12-01T18:18:25,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/373ae11c663c475cae0a2c8463897de9 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/373ae11c663c475cae0a2c8463897de9 2024-12-01T18:18:25,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741846_1022 (size=77532) 2024-12-01T18:18:25,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741846_1022 (size=77532) 2024-12-01T18:18:25,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/373ae11c663c475cae0a2c8463897de9, entries=21, sequenceid=108, filesize=27.0 K 2024-12-01T18:18:25,840 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=8.41 KB/8608 for b658ffcf569fa9b7017ef7c6129a394c in 28ms, sequenceid=108, compaction requested=false 2024-12-01T18:18:25,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b658ffcf569fa9b7017ef7c6129a394c: 2024-12-01T18:18:25,841 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=112.2 K, sizeToCheck=16.0 K 2024-12-01T18:18:25,841 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-01T18:18:25,841 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/0a36ba5344334572b123dfd324537763 because midkey is the same as first or last row 2024-12-01T18:18:25,841 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/6ff3197a34204987bf311c2e692858df as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/6ff3197a34204987bf311c2e692858df 2024-12-01T18:18:25,847 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b658ffcf569fa9b7017ef7c6129a394c/info of b658ffcf569fa9b7017ef7c6129a394c into 6ff3197a34204987bf311c2e692858df(size=75.7 K), total size for store is 102.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-01T18:18:25,847 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b658ffcf569fa9b7017ef7c6129a394c: 2024-12-01T18:18:25,847 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c., storeName=b658ffcf569fa9b7017ef7c6129a394c/info, priority=13, startTime=1733077105811; duration=0sec 2024-12-01T18:18:25,847 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=102.7 K, sizeToCheck=16.0 K 2024-12-01T18:18:25,847 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-01T18:18:25,848 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:25,848 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:25,848 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b658ffcf569fa9b7017ef7c6129a394c:info 2024-12-01T18:18:25,849 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33181 {}] assignment.AssignmentManager(1346): Split request from b8365d49b74c,41383,1733077080285, parent={ENCODED => b658ffcf569fa9b7017ef7c6129a394c, NAME => 'TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-01T18:18:25,855 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33181 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:25,860 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33181 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b658ffcf569fa9b7017ef7c6129a394c, daughterA=571d6e6376c1f1f14824532620919ac4, daughterB=0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:25,861 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b658ffcf569fa9b7017ef7c6129a394c, daughterA=571d6e6376c1f1f14824532620919ac4, daughterB=0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:25,861 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b658ffcf569fa9b7017ef7c6129a394c, daughterA=571d6e6376c1f1f14824532620919ac4, daughterB=0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:25,862 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b658ffcf569fa9b7017ef7c6129a394c, daughterA=571d6e6376c1f1f14824532620919ac4, daughterB=0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:25,868 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b658ffcf569fa9b7017ef7c6129a394c, UNASSIGN}] 2024-12-01T18:18:25,868 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b658ffcf569fa9b7017ef7c6129a394c, UNASSIGN 2024-12-01T18:18:25,869 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=b658ffcf569fa9b7017ef7c6129a394c, regionState=CLOSING, regionLocation=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:25,871 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-01T18:18:25,871 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE; CloseRegionProcedure b658ffcf569fa9b7017ef7c6129a394c, server=b8365d49b74c,41383,1733077080285}] 2024-12-01T18:18:26,017 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-01T18:18:26,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,019 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,019 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,019 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,026 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41383,1733077080285 2024-12-01T18:18:26,027 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(124): Close b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:26,028 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-01T18:18:26,028 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1681): Closing b658ffcf569fa9b7017ef7c6129a394c, disabling compactions & flushes 2024-12-01T18:18:26,028 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:26,028 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:26,029 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. after waiting 0 ms 2024-12-01T18:18:26,029 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:26,029 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(2837): Flushing b658ffcf569fa9b7017ef7c6129a394c 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-01T18:18:26,034 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/1edf9483c56c400992dfbf975dd84516 is 1080, key is row0089/info:/1733077105812/Put/seqid=0 2024-12-01T18:18:26,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,039 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,039 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741847_1023 (size=13586) 2024-12-01T18:18:26,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741847_1023 (size=13586) 2024-12-01T18:18:26,042 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/1edf9483c56c400992dfbf975dd84516 2024-12-01T18:18:26,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:26,048 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/.tmp/info/1edf9483c56c400992dfbf975dd84516 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/1edf9483c56c400992dfbf975dd84516 2024-12-01T18:18:26,054 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/1edf9483c56c400992dfbf975dd84516, entries=8, sequenceid=120, filesize=13.3 K 2024-12-01T18:18:26,056 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(3040): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for b658ffcf569fa9b7017ef7c6129a394c in 26ms, sequenceid=120, compaction requested=true 2024-12-01T18:18:26,057 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/a0ceaddb92a64fddba24446009ab99e7, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/4d7b8c279a3940e486e870493922ed21, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/0a36ba5344334572b123dfd324537763, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/605881721fb84e3c9b98578d5f0872ef, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/d3927189f33645e88d7a332d8d6b6ca4, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/510b47003fe9451e900f9c514c5e9d00] to archive 2024-12-01T18:18:26,057 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-01T18:18:26,059 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/a0ceaddb92a64fddba24446009ab99e7 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/a0ceaddb92a64fddba24446009ab99e7 2024-12-01T18:18:26,060 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/4d7b8c279a3940e486e870493922ed21 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/4d7b8c279a3940e486e870493922ed21 2024-12-01T18:18:26,061 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/0a36ba5344334572b123dfd324537763 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/0a36ba5344334572b123dfd324537763 2024-12-01T18:18:26,062 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/605881721fb84e3c9b98578d5f0872ef to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/605881721fb84e3c9b98578d5f0872ef 2024-12-01T18:18:26,063 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/d3927189f33645e88d7a332d8d6b6ca4 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/d3927189f33645e88d7a332d8d6b6ca4 2024-12-01T18:18:26,065 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/510b47003fe9451e900f9c514c5e9d00 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/510b47003fe9451e900f9c514c5e9d00 2024-12-01T18:18:26,069 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=1 2024-12-01T18:18:26,070 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. 2024-12-01T18:18:26,070 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1635): Region close journal for b658ffcf569fa9b7017ef7c6129a394c: 2024-12-01T18:18:26,072 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(170): Closed b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:26,072 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=b658ffcf569fa9b7017ef7c6129a394c, regionState=CLOSED 2024-12-01T18:18:26,076 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=14, resume processing ppid=13 2024-12-01T18:18:26,076 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=13, state=SUCCESS; CloseRegionProcedure b658ffcf569fa9b7017ef7c6129a394c, server=b8365d49b74c,41383,1733077080285 in 203 msec 2024-12-01T18:18:26,078 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-01T18:18:26,078 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b658ffcf569fa9b7017ef7c6129a394c, UNASSIGN in 208 msec 2024-12-01T18:18:26,099 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:26,101 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=12 splitting 3 storefiles, region=b658ffcf569fa9b7017ef7c6129a394c, threads=3 2024-12-01T18:18:26,101 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/1edf9483c56c400992dfbf975dd84516 for region: b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:26,101 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/373ae11c663c475cae0a2c8463897de9 for region: b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:26,102 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/6ff3197a34204987bf311c2e692858df for region: b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:26,113 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/373ae11c663c475cae0a2c8463897de9, top=true 2024-12-01T18:18:26,113 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/1edf9483c56c400992dfbf975dd84516, top=true 2024-12-01T18:18:26,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741848_1024 (size=27) 2024-12-01T18:18:26,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741848_1024 (size=27) 2024-12-01T18:18:26,128 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-373ae11c663c475cae0a2c8463897de9 for child: 0d069298e0443478925bd2f47d58cef2, parent: b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:26,128 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/373ae11c663c475cae0a2c8463897de9 for region: b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:26,128 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-1edf9483c56c400992dfbf975dd84516 for child: 0d069298e0443478925bd2f47d58cef2, parent: b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:26,128 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/1edf9483c56c400992dfbf975dd84516 for region: b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:26,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741849_1025 (size=27) 2024-12-01T18:18:26,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741849_1025 (size=27) 2024-12-01T18:18:26,137 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/6ff3197a34204987bf311c2e692858df for region: b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:18:26,137 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=12 split storefiles for region b658ffcf569fa9b7017ef7c6129a394c Daughter A: [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4/info/6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c] storefiles, Daughter B: [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-1edf9483c56c400992dfbf975dd84516, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-373ae11c663c475cae0a2c8463897de9, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c] storefiles. 2024-12-01T18:18:26,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741850_1026 (size=71) 2024-12-01T18:18:26,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741850_1026 (size=71) 2024-12-01T18:18:26,147 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:26,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741851_1027 (size=71) 2024-12-01T18:18:26,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741851_1027 (size=71) 2024-12-01T18:18:26,160 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:26,168 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=-1 2024-12-01T18:18:26,170 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=-1 2024-12-01T18:18:26,172 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733077106172"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733077106172"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733077106172"}]},"ts":"1733077106172"} 2024-12-01T18:18:26,173 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733077106172"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733077106172"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733077106172"}]},"ts":"1733077106172"} 2024-12-01T18:18:26,173 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733077106172"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733077106172"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733077106172"}]},"ts":"1733077106172"} 2024-12-01T18:18:26,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41383 {}] regionserver.HRegion(8581): Flush requested on 1588230740 2024-12-01T18:18:26,203 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-01T18:18:26,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=4.75 KB heapSize=8.29 KB 2024-12-01T18:18:26,207 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=571d6e6376c1f1f14824532620919ac4, ASSIGN}, {pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d069298e0443478925bd2f47d58cef2, ASSIGN}] 2024-12-01T18:18:26,208 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=571d6e6376c1f1f14824532620919ac4, ASSIGN 2024-12-01T18:18:26,208 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d069298e0443478925bd2f47d58cef2, ASSIGN 2024-12-01T18:18:26,209 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d069298e0443478925bd2f47d58cef2, ASSIGN; state=SPLITTING_NEW, location=b8365d49b74c,41383,1733077080285; forceNewPlan=false, retain=false 2024-12-01T18:18:26,209 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=571d6e6376c1f1f14824532620919ac4, ASSIGN; state=SPLITTING_NEW, location=b8365d49b74c,41383,1733077080285; forceNewPlan=false, retain=false 2024-12-01T18:18:26,220 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/.tmp/info/7d67f7ebaccd42ceabd253350e702399 is 193, key is TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2./info:regioninfo/1733077106172/Put/seqid=0 2024-12-01T18:18:26,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741852_1028 (size=9423) 2024-12-01T18:18:26,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741852_1028 (size=9423) 2024-12-01T18:18:26,226 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.54 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/.tmp/info/7d67f7ebaccd42ceabd253350e702399 2024-12-01T18:18:26,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/.tmp/table/ced0920eb0674d21b57558d31af360c8 is 65, key is TestLogRolling-testLogRolling/table:state/1733077081967/Put/seqid=0 2024-12-01T18:18:26,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741853_1029 (size=5412) 2024-12-01T18:18:26,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741853_1029 (size=5412) 2024-12-01T18:18:26,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=216 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/.tmp/table/ced0920eb0674d21b57558d31af360c8 2024-12-01T18:18:26,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/.tmp/info/7d67f7ebaccd42ceabd253350e702399 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/info/7d67f7ebaccd42ceabd253350e702399 2024-12-01T18:18:26,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/info/7d67f7ebaccd42ceabd253350e702399, entries=29, sequenceid=17, filesize=9.2 K 2024-12-01T18:18:26,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/.tmp/table/ced0920eb0674d21b57558d31af360c8 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/table/ced0920eb0674d21b57558d31af360c8 2024-12-01T18:18:26,270 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/table/ced0920eb0674d21b57558d31af360c8, entries=4, sequenceid=17, filesize=5.3 K 2024-12-01T18:18:26,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~4.75 KB/4869, heapSize ~8.01 KB/8200, currentSize=0 B/0 for 1588230740 in 67ms, sequenceid=17, compaction requested=false 2024-12-01T18:18:26,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-01T18:18:26,359 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=0d069298e0443478925bd2f47d58cef2, regionState=OPENING, regionLocation=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:26,359 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=571d6e6376c1f1f14824532620919ac4, regionState=OPENING, regionLocation=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:26,361 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; OpenRegionProcedure 0d069298e0443478925bd2f47d58cef2, server=b8365d49b74c,41383,1733077080285}] 2024-12-01T18:18:26,362 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=15, state=RUNNABLE; OpenRegionProcedure 571d6e6376c1f1f14824532620919ac4, server=b8365d49b74c,41383,1733077080285}] 2024-12-01T18:18:26,514 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b8365d49b74c,41383,1733077080285 2024-12-01T18:18:26,517 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4. 2024-12-01T18:18:26,518 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7285): Opening region: {ENCODED => 571d6e6376c1f1f14824532620919ac4, NAME => 'TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-01T18:18:26,518 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 571d6e6376c1f1f14824532620919ac4 2024-12-01T18:18:26,518 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:18:26,518 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7327): checking encryption for 571d6e6376c1f1f14824532620919ac4 2024-12-01T18:18:26,518 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7330): checking classloading for 571d6e6376c1f1f14824532620919ac4 2024-12-01T18:18:26,519 INFO [StoreOpener-571d6e6376c1f1f14824532620919ac4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 571d6e6376c1f1f14824532620919ac4 2024-12-01T18:18:26,520 INFO [StoreOpener-571d6e6376c1f1f14824532620919ac4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 571d6e6376c1f1f14824532620919ac4 columnFamilyName info 2024-12-01T18:18:26,520 DEBUG [StoreOpener-571d6e6376c1f1f14824532620919ac4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:26,531 DEBUG [StoreOpener-571d6e6376c1f1f14824532620919ac4-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4/info/6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c->hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/6ff3197a34204987bf311c2e692858df-bottom 2024-12-01T18:18:26,532 INFO [StoreOpener-571d6e6376c1f1f14824532620919ac4-1 {}] regionserver.HStore(327): Store=571d6e6376c1f1f14824532620919ac4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:18:26,533 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4 2024-12-01T18:18:26,534 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4 2024-12-01T18:18:26,536 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1085): writing seq id for 571d6e6376c1f1f14824532620919ac4 2024-12-01T18:18:26,536 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1102): Opened 571d6e6376c1f1f14824532620919ac4; next sequenceid=124; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827887, jitterRate=0.05271339416503906}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:18:26,537 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1001): Region open journal for 571d6e6376c1f1f14824532620919ac4: 2024-12-01T18:18:26,538 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4., pid=18, masterSystemTime=1733077106514 2024-12-01T18:18:26,538 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(403): Add compact mark for store 571d6e6376c1f1f14824532620919ac4:info, priority=-2147483648, current under compaction store size is 1 2024-12-01T18:18:26,538 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:26,538 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-01T18:18:26,539 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4. 2024-12-01T18:18:26,539 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1540): 571d6e6376c1f1f14824532620919ac4/info is initiating minor compaction (all files) 2024-12-01T18:18:26,539 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 571d6e6376c1f1f14824532620919ac4/info in TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4. 2024-12-01T18:18:26,539 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4/info/6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c->hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/6ff3197a34204987bf311c2e692858df-bottom] into tmpdir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4/.tmp, totalSize=75.7 K 2024-12-01T18:18:26,539 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4. 2024-12-01T18:18:26,540 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4. 2024-12-01T18:18:26,540 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:18:26,540 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1733077091624 2024-12-01T18:18:26,540 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7285): Opening region: {ENCODED => 0d069298e0443478925bd2f47d58cef2, NAME => 'TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-01T18:18:26,540 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=571d6e6376c1f1f14824532620919ac4, regionState=OPEN, openSeqNum=124, regionLocation=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:26,540 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:26,540 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:18:26,540 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7327): checking encryption for 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:26,540 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7330): checking classloading for 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:26,542 INFO [StoreOpener-0d069298e0443478925bd2f47d58cef2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:26,543 INFO [StoreOpener-0d069298e0443478925bd2f47d58cef2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d069298e0443478925bd2f47d58cef2 columnFamilyName info 2024-12-01T18:18:26,543 DEBUG [StoreOpener-0d069298e0443478925bd2f47d58cef2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:18:26,544 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=15 2024-12-01T18:18:26,544 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=15, state=SUCCESS; OpenRegionProcedure 571d6e6376c1f1f14824532620919ac4, server=b8365d49b74c,41383,1733077080285 in 180 msec 2024-12-01T18:18:26,545 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=571d6e6376c1f1f14824532620919ac4, ASSIGN in 337 msec 2024-12-01T18:18:26,552 DEBUG [StoreOpener-0d069298e0443478925bd2f47d58cef2-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c->hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/6ff3197a34204987bf311c2e692858df-top 2024-12-01T18:18:26,557 DEBUG [StoreOpener-0d069298e0443478925bd2f47d58cef2-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-1edf9483c56c400992dfbf975dd84516 2024-12-01T18:18:26,561 DEBUG [StoreOpener-0d069298e0443478925bd2f47d58cef2-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-373ae11c663c475cae0a2c8463897de9 2024-12-01T18:18:26,561 INFO [StoreOpener-0d069298e0443478925bd2f47d58cef2-1 {}] regionserver.HStore(327): Store=0d069298e0443478925bd2f47d58cef2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:18:26,562 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:26,563 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:26,563 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 571d6e6376c1f1f14824532620919ac4#info#compaction#49 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-01T18:18:26,564 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4/.tmp/info/fea033a9e18c453c92ac856542e7ed86 is 1080, key is row0001/info:/1733077091624/Put/seqid=0 2024-12-01T18:18:26,565 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1085): writing seq id for 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:26,566 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1102): Opened 0d069298e0443478925bd2f47d58cef2; next sequenceid=124; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=738562, jitterRate=-0.06087037920951843}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:18:26,566 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1001): Region open journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:26,567 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2., pid=17, masterSystemTime=1733077106514 2024-12-01T18:18:26,567 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(403): Add compact mark for store 0d069298e0443478925bd2f47d58cef2:info, priority=-2147483648, current under compaction store size is 2 2024-12-01T18:18:26,567 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:26,567 DEBUG [RS:0;b8365d49b74c:41383-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-01T18:18:26,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741854_1030 (size=70862) 2024-12-01T18:18:26,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741854_1030 (size=70862) 2024-12-01T18:18:26,569 INFO [RS:0;b8365d49b74c:41383-longCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:18:26,569 DEBUG [RS:0;b8365d49b74c:41383-longCompactions-0 {}] regionserver.HStore(1540): 0d069298e0443478925bd2f47d58cef2/info is initiating minor compaction (all files) 2024-12-01T18:18:26,569 INFO [RS:0;b8365d49b74c:41383-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d069298e0443478925bd2f47d58cef2/info in TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:18:26,569 DEBUG [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:18:26,569 INFO [RS:0;b8365d49b74c:41383-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c->hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/6ff3197a34204987bf311c2e692858df-top, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-373ae11c663c475cae0a2c8463897de9, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-1edf9483c56c400992dfbf975dd84516] into tmpdir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp, totalSize=115.9 K 2024-12-01T18:18:26,569 INFO [RS_OPEN_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:18:26,570 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=0d069298e0443478925bd2f47d58cef2, regionState=OPEN, openSeqNum=124, regionLocation=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:26,571 DEBUG [RS:0;b8365d49b74c:41383-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1733077091624 2024-12-01T18:18:26,571 DEBUG [RS:0;b8365d49b74c:41383-longCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-373ae11c663c475cae0a2c8463897de9, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=108, earliestPutTs=1733077105789 2024-12-01T18:18:26,571 DEBUG [RS:0;b8365d49b74c:41383-longCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-1edf9483c56c400992dfbf975dd84516, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733077105812 2024-12-01T18:18:26,573 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-01T18:18:26,573 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; OpenRegionProcedure 0d069298e0443478925bd2f47d58cef2, server=b8365d49b74c,41383,1733077080285 in 210 msec 2024-12-01T18:18:26,575 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=12 2024-12-01T18:18:26,575 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d069298e0443478925bd2f47d58cef2, ASSIGN in 366 msec 2024-12-01T18:18:26,576 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4/.tmp/info/fea033a9e18c453c92ac856542e7ed86 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4/info/fea033a9e18c453c92ac856542e7ed86 2024-12-01T18:18:26,577 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b658ffcf569fa9b7017ef7c6129a394c, daughterA=571d6e6376c1f1f14824532620919ac4, daughterB=0d069298e0443478925bd2f47d58cef2 in 720 msec 2024-12-01T18:18:26,583 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 1 (all) file(s) in 571d6e6376c1f1f14824532620919ac4/info of 571d6e6376c1f1f14824532620919ac4 into fea033a9e18c453c92ac856542e7ed86(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-01T18:18:26,583 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 571d6e6376c1f1f14824532620919ac4: 2024-12-01T18:18:26,583 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4., storeName=571d6e6376c1f1f14824532620919ac4/info, priority=15, startTime=1733077106538; duration=0sec 2024-12-01T18:18:26,583 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:26,583 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 571d6e6376c1f1f14824532620919ac4:info 2024-12-01T18:18:26,602 INFO [RS:0;b8365d49b74c:41383-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d069298e0443478925bd2f47d58cef2#info#compaction#50 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-01T18:18:26,602 DEBUG [RS:0;b8365d49b74c:41383-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/396fb8eb69b44942af36b57ecb934481 is 1080, key is row0062/info:/1733077103780/Put/seqid=0 2024-12-01T18:18:26,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741855_1031 (size=42984) 2024-12-01T18:18:26,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741855_1031 (size=42984) 2024-12-01T18:18:26,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:26,613 DEBUG [RS:0;b8365d49b74c:41383-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/396fb8eb69b44942af36b57ecb934481 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/396fb8eb69b44942af36b57ecb934481 2024-12-01T18:18:26,619 INFO [RS:0;b8365d49b74c:41383-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d069298e0443478925bd2f47d58cef2/info of 0d069298e0443478925bd2f47d58cef2 into 396fb8eb69b44942af36b57ecb934481(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-01T18:18:26,619 DEBUG [RS:0;b8365d49b74c:41383-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:26,619 INFO [RS:0;b8365d49b74c:41383-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2., storeName=0d069298e0443478925bd2f47d58cef2/info, priority=13, startTime=1733077106567; duration=0sec 2024-12-01T18:18:26,619 DEBUG [RS:0;b8365d49b74c:41383-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:26,619 DEBUG [RS:0;b8365d49b74c:41383-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d069298e0443478925bd2f47d58cef2:info 2024-12-01T18:18:27,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:27,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:51118 deadline: 1733077117832, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733077081614.b658ffcf569fa9b7017ef7c6129a394c. is not online on b8365d49b74c,41383,1733077080285 2024-12-01T18:18:28,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:29,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:30,220 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T18:18:30,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:31,571 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-01T18:18:31,573 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,573 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,573 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,573 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,573 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,592 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,594 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,594 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:18:31,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:32,516 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:18:32,517 INFO [RS-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36596, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:18:32,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:33,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:34,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:35,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:36,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:37,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:37,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:37,894 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d069298e0443478925bd2f47d58cef2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-01T18:18:37,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/cd7a3337a35343338dd181be4e0ba657 is 1080, key is row0097/info:/1733077117887/Put/seqid=0 2024-12-01T18:18:37,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741856_1032 (size=12515) 2024-12-01T18:18:37,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741856_1032 (size=12515) 2024-12-01T18:18:37,904 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/cd7a3337a35343338dd181be4e0ba657 2024-12-01T18:18:37,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/cd7a3337a35343338dd181be4e0ba657 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/cd7a3337a35343338dd181be4e0ba657 2024-12-01T18:18:37,916 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/cd7a3337a35343338dd181be4e0ba657, entries=7, sequenceid=134, filesize=12.2 K 2024-12-01T18:18:37,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for 0d069298e0443478925bd2f47d58cef2 in 23ms, sequenceid=134, compaction requested=false 2024-12-01T18:18:37,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:37,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d069298e0443478925bd2f47d58cef2 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-12-01T18:18:37,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/afeb2743e2e54c79b87dc76ad54102a6 is 1080, key is row0104/info:/1733077117895/Put/seqid=0 2024-12-01T18:18:37,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741857_1033 (size=27628) 2024-12-01T18:18:37,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741857_1033 (size=27628) 2024-12-01T18:18:37,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/afeb2743e2e54c79b87dc76ad54102a6 2024-12-01T18:18:37,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/afeb2743e2e54c79b87dc76ad54102a6 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/afeb2743e2e54c79b87dc76ad54102a6 2024-12-01T18:18:37,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/afeb2743e2e54c79b87dc76ad54102a6, entries=21, sequenceid=158, filesize=27.0 K 2024-12-01T18:18:37,945 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=4.20 KB/4304 for 0d069298e0443478925bd2f47d58cef2 in 27ms, sequenceid=158, compaction requested=true 2024-12-01T18:18:37,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:37,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d069298e0443478925bd2f47d58cef2:info, priority=-2147483648, current under compaction store size is 1 2024-12-01T18:18:37,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:37,946 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-01T18:18:37,947 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 83127 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-01T18:18:37,947 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1540): 0d069298e0443478925bd2f47d58cef2/info is initiating minor compaction (all files) 2024-12-01T18:18:37,947 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d069298e0443478925bd2f47d58cef2/info in TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:18:37,947 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/396fb8eb69b44942af36b57ecb934481, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/cd7a3337a35343338dd181be4e0ba657, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/afeb2743e2e54c79b87dc76ad54102a6] into tmpdir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp, totalSize=81.2 K 2024-12-01T18:18:37,947 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting 396fb8eb69b44942af36b57ecb934481, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733077103780 2024-12-01T18:18:37,947 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd7a3337a35343338dd181be4e0ba657, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733077117887 2024-12-01T18:18:37,948 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting afeb2743e2e54c79b87dc76ad54102a6, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733077117895 2024-12-01T18:18:37,959 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d069298e0443478925bd2f47d58cef2#info#compaction#53 average throughput is 32.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-01T18:18:37,959 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/e45695a84add48e38c755c5c0911718d is 1080, key is row0062/info:/1733077103780/Put/seqid=0 2024-12-01T18:18:37,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741858_1034 (size=73410) 2024-12-01T18:18:37,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741858_1034 (size=73410) 2024-12-01T18:18:37,969 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/e45695a84add48e38c755c5c0911718d as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/e45695a84add48e38c755c5c0911718d 2024-12-01T18:18:37,975 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d069298e0443478925bd2f47d58cef2/info of 0d069298e0443478925bd2f47d58cef2 into e45695a84add48e38c755c5c0911718d(size=71.7 K), total size for store is 71.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-01T18:18:37,975 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:37,975 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2., storeName=0d069298e0443478925bd2f47d58cef2/info, priority=13, startTime=1733077117946; duration=0sec 2024-12-01T18:18:37,975 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:37,975 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d069298e0443478925bd2f47d58cef2:info 2024-12-01T18:18:38,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:39,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:39,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:39,926 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d069298e0443478925bd2f47d58cef2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-01T18:18:39,931 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/ce3f7c37382a4ea8847d26e7ac182763 is 1080, key is row0125/info:/1733077117918/Put/seqid=0 2024-12-01T18:18:39,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741859_1035 (size=12516) 2024-12-01T18:18:39,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741859_1035 (size=12516) 2024-12-01T18:18:39,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/ce3f7c37382a4ea8847d26e7ac182763 2024-12-01T18:18:39,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/ce3f7c37382a4ea8847d26e7ac182763 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/ce3f7c37382a4ea8847d26e7ac182763 2024-12-01T18:18:39,948 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/ce3f7c37382a4ea8847d26e7ac182763, entries=7, sequenceid=169, filesize=12.2 K 2024-12-01T18:18:39,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for 0d069298e0443478925bd2f47d58cef2 in 22ms, sequenceid=169, compaction requested=false 2024-12-01T18:18:39,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:39,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:39,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d069298e0443478925bd2f47d58cef2 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-01T18:18:39,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/3b1dc617f7924078aef82ef19131e8a5 is 1080, key is row0132/info:/1733077119927/Put/seqid=0 2024-12-01T18:18:39,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741860_1036 (size=29784) 2024-12-01T18:18:39,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741860_1036 (size=29784) 2024-12-01T18:18:39,960 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/3b1dc617f7924078aef82ef19131e8a5 2024-12-01T18:18:39,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/3b1dc617f7924078aef82ef19131e8a5 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/3b1dc617f7924078aef82ef19131e8a5 2024-12-01T18:18:39,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/3b1dc617f7924078aef82ef19131e8a5, entries=23, sequenceid=195, filesize=29.1 K 2024-12-01T18:18:39,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=6.30 KB/6456 for 0d069298e0443478925bd2f47d58cef2 in 20ms, sequenceid=195, compaction requested=true 2024-12-01T18:18:39,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:39,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d069298e0443478925bd2f47d58cef2:info, priority=-2147483648, current under compaction store size is 1 2024-12-01T18:18:39,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:39,971 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-01T18:18:39,972 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 115710 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-01T18:18:39,972 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1540): 0d069298e0443478925bd2f47d58cef2/info is initiating minor compaction (all files) 2024-12-01T18:18:39,972 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d069298e0443478925bd2f47d58cef2/info in TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:18:39,972 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/e45695a84add48e38c755c5c0911718d, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/ce3f7c37382a4ea8847d26e7ac182763, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/3b1dc617f7924078aef82ef19131e8a5] into tmpdir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp, totalSize=113.0 K 2024-12-01T18:18:39,972 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting e45695a84add48e38c755c5c0911718d, keycount=63, bloomtype=ROW, size=71.7 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733077103780 2024-12-01T18:18:39,973 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce3f7c37382a4ea8847d26e7ac182763, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733077117918 2024-12-01T18:18:39,973 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b1dc617f7924078aef82ef19131e8a5, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733077119927 2024-12-01T18:18:39,985 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d069298e0443478925bd2f47d58cef2#info#compaction#56 average throughput is 31.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-01T18:18:39,986 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/e0e505465b5b4a7980fce95342d9033e is 1080, key is row0062/info:/1733077103780/Put/seqid=0 2024-12-01T18:18:39,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741861_1037 (size=105860) 2024-12-01T18:18:39,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741861_1037 (size=105860) 2024-12-01T18:18:39,994 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/e0e505465b5b4a7980fce95342d9033e as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/e0e505465b5b4a7980fce95342d9033e 2024-12-01T18:18:39,999 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d069298e0443478925bd2f47d58cef2/info of 0d069298e0443478925bd2f47d58cef2 into e0e505465b5b4a7980fce95342d9033e(size=103.4 K), total size for store is 103.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-01T18:18:39,999 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:39,999 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2., storeName=0d069298e0443478925bd2f47d58cef2/info, priority=13, startTime=1733077119970; duration=0sec 2024-12-01T18:18:40,000 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:40,000 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d069298e0443478925bd2f47d58cef2:info 2024-12-01T18:18:40,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:41,528 INFO [master/b8365d49b74c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-01T18:18:41,528 INFO [master/b8365d49b74c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-01T18:18:41,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:41,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:41,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d069298e0443478925bd2f47d58cef2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-01T18:18:41,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/f347a07ff22e42e1a87a959bf6bc745d is 1080, key is row0155/info:/1733077119950/Put/seqid=0 2024-12-01T18:18:41,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741862_1038 (size=12516) 2024-12-01T18:18:41,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741862_1038 (size=12516) 2024-12-01T18:18:41,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/f347a07ff22e42e1a87a959bf6bc745d 2024-12-01T18:18:41,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/f347a07ff22e42e1a87a959bf6bc745d as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/f347a07ff22e42e1a87a959bf6bc745d 2024-12-01T18:18:41,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/f347a07ff22e42e1a87a959bf6bc745d, entries=7, sequenceid=206, filesize=12.2 K 2024-12-01T18:18:41,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for 0d069298e0443478925bd2f47d58cef2 in 22ms, sequenceid=206, compaction requested=false 2024-12-01T18:18:41,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:41,981 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d069298e0443478925bd2f47d58cef2 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-12-01T18:18:41,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/ee0bab7e535246408b43c23d784556ea is 1080, key is row0162/info:/1733077121958/Put/seqid=0 2024-12-01T18:18:41,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0d069298e0443478925bd2f47d58cef2, server=b8365d49b74c,41383,1733077080285 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-01T18:18:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:51118 deadline: 1733077131990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0d069298e0443478925bd2f47d58cef2, server=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:41,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741863_1039 (size=27628) 2024-12-01T18:18:41,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741863_1039 (size=27628) 2024-12-01T18:18:41,999 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/ee0bab7e535246408b43c23d784556ea 2024-12-01T18:18:42,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/ee0bab7e535246408b43c23d784556ea as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/ee0bab7e535246408b43c23d784556ea 2024-12-01T18:18:42,009 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/ee0bab7e535246408b43c23d784556ea, entries=21, sequenceid=230, filesize=27.0 K 2024-12-01T18:18:42,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=8.41 KB/8608 for 0d069298e0443478925bd2f47d58cef2 in 30ms, sequenceid=230, compaction requested=true 2024-12-01T18:18:42,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:42,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d069298e0443478925bd2f47d58cef2:info, priority=-2147483648, current under compaction store size is 1 2024-12-01T18:18:42,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:42,010 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-01T18:18:42,011 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 146004 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-01T18:18:42,012 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1540): 0d069298e0443478925bd2f47d58cef2/info is initiating minor compaction (all files) 2024-12-01T18:18:42,012 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d069298e0443478925bd2f47d58cef2/info in TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:18:42,012 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/e0e505465b5b4a7980fce95342d9033e, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/f347a07ff22e42e1a87a959bf6bc745d, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/ee0bab7e535246408b43c23d784556ea] into tmpdir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp, totalSize=142.6 K 2024-12-01T18:18:42,012 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0e505465b5b4a7980fce95342d9033e, keycount=93, bloomtype=ROW, size=103.4 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733077103780 2024-12-01T18:18:42,012 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting f347a07ff22e42e1a87a959bf6bc745d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733077119950 2024-12-01T18:18:42,013 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee0bab7e535246408b43c23d784556ea, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1733077121958 2024-12-01T18:18:42,024 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d069298e0443478925bd2f47d58cef2#info#compaction#59 average throughput is 41.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-01T18:18:42,025 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/8ce60136ece94fceb3a394c45e3d2fb0 is 1080, key is row0062/info:/1733077103780/Put/seqid=0 2024-12-01T18:18:42,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741864_1040 (size=136278) 2024-12-01T18:18:42,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741864_1040 (size=136278) 2024-12-01T18:18:42,035 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/8ce60136ece94fceb3a394c45e3d2fb0 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/8ce60136ece94fceb3a394c45e3d2fb0 2024-12-01T18:18:42,041 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d069298e0443478925bd2f47d58cef2/info of 0d069298e0443478925bd2f47d58cef2 into 8ce60136ece94fceb3a394c45e3d2fb0(size=133.1 K), total size for store is 133.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-01T18:18:42,041 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:42,041 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2., storeName=0d069298e0443478925bd2f47d58cef2/info, priority=13, startTime=1733077122010; duration=0sec 2024-12-01T18:18:42,041 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:42,041 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d069298e0443478925bd2f47d58cef2:info 2024-12-01T18:18:42,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:43,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:44,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:45,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:46,054 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 14835 2024-12-01T18:18:46,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:47,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:48,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:49,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:50,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:51,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:52,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:52,072 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d069298e0443478925bd2f47d58cef2 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-01T18:18:52,078 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/2ec6d033ebcd4b86b49d353e41a7a018 is 1080, key is row0183/info:/1733077121981/Put/seqid=0 2024-12-01T18:18:52,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741865_1041 (size=14672) 2024-12-01T18:18:52,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741865_1041 (size=14672) 2024-12-01T18:18:52,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/2ec6d033ebcd4b86b49d353e41a7a018 2024-12-01T18:18:52,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/2ec6d033ebcd4b86b49d353e41a7a018 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/2ec6d033ebcd4b86b49d353e41a7a018 2024-12-01T18:18:52,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/2ec6d033ebcd4b86b49d353e41a7a018, entries=9, sequenceid=243, filesize=14.3 K 2024-12-01T18:18:52,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=1.05 KB/1076 for 0d069298e0443478925bd2f47d58cef2 in 22ms, sequenceid=243, compaction requested=false 2024-12-01T18:18:52,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:52,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:53,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:54,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:18:54,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d069298e0443478925bd2f47d58cef2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-01T18:18:54,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/eff185343b194fe78d39758252b1662b is 1080, key is row0192/info:/1733077132073/Put/seqid=0 2024-12-01T18:18:54,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741866_1042 (size=12516) 2024-12-01T18:18:54,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741866_1042 (size=12516) 2024-12-01T18:18:54,101 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/eff185343b194fe78d39758252b1662b 2024-12-01T18:18:54,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0d069298e0443478925bd2f47d58cef2, server=b8365d49b74c,41383,1733077080285 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-01T18:18:54,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:51118 deadline: 1733077144102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0d069298e0443478925bd2f47d58cef2, server=b8365d49b74c,41383,1733077080285 2024-12-01T18:18:54,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/eff185343b194fe78d39758252b1662b as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/eff185343b194fe78d39758252b1662b 2024-12-01T18:18:54,112 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/eff185343b194fe78d39758252b1662b, entries=7, sequenceid=253, filesize=12.2 K 2024-12-01T18:18:54,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 0d069298e0443478925bd2f47d58cef2 in 33ms, sequenceid=253, compaction requested=true 2024-12-01T18:18:54,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:54,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d069298e0443478925bd2f47d58cef2:info, priority=-2147483648, current under compaction store size is 1 2024-12-01T18:18:54,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:54,113 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-01T18:18:54,114 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 163466 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-01T18:18:54,114 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1540): 0d069298e0443478925bd2f47d58cef2/info is initiating minor compaction (all files) 2024-12-01T18:18:54,114 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d069298e0443478925bd2f47d58cef2/info in TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:18:54,114 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/8ce60136ece94fceb3a394c45e3d2fb0, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/2ec6d033ebcd4b86b49d353e41a7a018, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/eff185343b194fe78d39758252b1662b] into tmpdir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp, totalSize=159.6 K 2024-12-01T18:18:54,114 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ce60136ece94fceb3a394c45e3d2fb0, keycount=121, bloomtype=ROW, size=133.1 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1733077103780 2024-12-01T18:18:54,115 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ec6d033ebcd4b86b49d353e41a7a018, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733077121981 2024-12-01T18:18:54,115 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting eff185343b194fe78d39758252b1662b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733077132073 2024-12-01T18:18:54,127 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d069298e0443478925bd2f47d58cef2#info#compaction#62 average throughput is 46.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-01T18:18:54,129 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/8b41f6e85e4246518fa91f83bc2111cb is 1080, key is row0062/info:/1733077103780/Put/seqid=0 2024-12-01T18:18:54,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741867_1043 (size=153701) 2024-12-01T18:18:54,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741867_1043 (size=153701) 2024-12-01T18:18:54,138 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/8b41f6e85e4246518fa91f83bc2111cb as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/8b41f6e85e4246518fa91f83bc2111cb 2024-12-01T18:18:54,167 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d069298e0443478925bd2f47d58cef2/info of 0d069298e0443478925bd2f47d58cef2 into 8b41f6e85e4246518fa91f83bc2111cb(size=150.1 K), total size for store is 150.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-01T18:18:54,167 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:18:54,167 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2., storeName=0d069298e0443478925bd2f47d58cef2/info, priority=13, startTime=1733077134113; duration=0sec 2024-12-01T18:18:54,167 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:18:54,167 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d069298e0443478925bd2f47d58cef2:info 2024-12-01T18:18:54,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:55,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:56,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:57,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:58,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:18:59,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:19:00,220 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T18:19:00,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:19:01,309 DEBUG [master/b8365d49b74c:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region ef4cc29d327f6d110395be9a9161ded7 changed from -1.0 to 0.0, refreshing cache 2024-12-01T18:19:01,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:19:02,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:19:03,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:19:04,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:19:04,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d069298e0443478925bd2f47d58cef2 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-01T18:19:04,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/d298da5fed6047ad8c39571d7bb19110 is 1080, key is row0199/info:/1733077134081/Put/seqid=0 2024-12-01T18:19:04,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741868_1044 (size=29806) 2024-12-01T18:19:04,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741868_1044 (size=29806) 2024-12-01T18:19:04,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/d298da5fed6047ad8c39571d7bb19110 2024-12-01T18:19:04,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/d298da5fed6047ad8c39571d7bb19110 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/d298da5fed6047ad8c39571d7bb19110 2024-12-01T18:19:04,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/d298da5fed6047ad8c39571d7bb19110, entries=23, sequenceid=280, filesize=29.1 K 2024-12-01T18:19:04,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=3.15 KB/3228 for 0d069298e0443478925bd2f47d58cef2 in 24ms, sequenceid=280, compaction requested=false 2024-12-01T18:19:04,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:19:04,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:19:05,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:19:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:19:06,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d069298e0443478925bd2f47d58cef2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-01T18:19:06,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/04f8596ece124cee83deef307f8b6392 is 1080, key is row0222/info:/1733077144182/Put/seqid=0 2024-12-01T18:19:06,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741869_1045 (size=12523) 2024-12-01T18:19:06,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741869_1045 (size=12523) 2024-12-01T18:19:06,200 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/04f8596ece124cee83deef307f8b6392 2024-12-01T18:19:06,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/04f8596ece124cee83deef307f8b6392 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/04f8596ece124cee83deef307f8b6392 2024-12-01T18:19:06,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/04f8596ece124cee83deef307f8b6392, entries=7, sequenceid=290, filesize=12.2 K 2024-12-01T18:19:06,212 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 0d069298e0443478925bd2f47d58cef2 in 22ms, sequenceid=290, compaction requested=true 2024-12-01T18:19:06,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:19:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41383 {}] regionserver.HRegion(8581): Flush requested on 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:19:06,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d069298e0443478925bd2f47d58cef2:info, priority=-2147483648, current under compaction store size is 1 2024-12-01T18:19:06,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:19:06,213 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-01T18:19:06,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d069298e0443478925bd2f47d58cef2 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-01T18:19:06,214 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 196030 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-01T18:19:06,214 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1540): 0d069298e0443478925bd2f47d58cef2/info is initiating minor compaction (all files) 2024-12-01T18:19:06,214 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d069298e0443478925bd2f47d58cef2/info in TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:19:06,214 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/8b41f6e85e4246518fa91f83bc2111cb, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/d298da5fed6047ad8c39571d7bb19110, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/04f8596ece124cee83deef307f8b6392] into tmpdir=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp, totalSize=191.4 K 2024-12-01T18:19:06,215 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b41f6e85e4246518fa91f83bc2111cb, keycount=137, bloomtype=ROW, size=150.1 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733077103780 2024-12-01T18:19:06,215 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting d298da5fed6047ad8c39571d7bb19110, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733077134081 2024-12-01T18:19:06,216 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04f8596ece124cee83deef307f8b6392, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733077144182 2024-12-01T18:19:06,217 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/1c70af89b7294e8ea41bd3c88cb87001 is 1080, key is row0229/info:/1733077146190/Put/seqid=0 2024-12-01T18:19:06,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741870_1046 (size=29807) 2024-12-01T18:19:06,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741870_1046 (size=29807) 2024-12-01T18:19:06,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/1c70af89b7294e8ea41bd3c88cb87001 2024-12-01T18:19:06,229 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d069298e0443478925bd2f47d58cef2#info#compaction#66 average throughput is 57.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-01T18:19:06,230 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/95455b54477e4193b0dfb55632475cc2 is 1080, key is row0062/info:/1733077103780/Put/seqid=0 2024-12-01T18:19:06,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/1c70af89b7294e8ea41bd3c88cb87001 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/1c70af89b7294e8ea41bd3c88cb87001 2024-12-01T18:19:06,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741871_1047 (size=186180) 2024-12-01T18:19:06,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741871_1047 (size=186180) 2024-12-01T18:19:06,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/1c70af89b7294e8ea41bd3c88cb87001, entries=23, sequenceid=316, filesize=29.1 K 2024-12-01T18:19:06,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=5.25 KB/5380 for 0d069298e0443478925bd2f47d58cef2 in 24ms, sequenceid=316, compaction requested=false 2024-12-01T18:19:06,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:19:06,238 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/95455b54477e4193b0dfb55632475cc2 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/95455b54477e4193b0dfb55632475cc2 2024-12-01T18:19:06,244 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d069298e0443478925bd2f47d58cef2/info of 0d069298e0443478925bd2f47d58cef2 into 95455b54477e4193b0dfb55632475cc2(size=181.8 K), total size for store is 210.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-01T18:19:06,244 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:19:06,244 INFO [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2., storeName=0d069298e0443478925bd2f47d58cef2/info, priority=13, startTime=1733077146212; duration=0sec 2024-12-01T18:19:06,244 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-01T18:19:06,244 DEBUG [RS:0;b8365d49b74c:41383-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d069298e0443478925bd2f47d58cef2:info 2024-12-01T18:19:06,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:19:07,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:19:08,220 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-01T18:19:08,220 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41383%2C1733077080285.1733077148220 2024-12-01T18:19:08,227 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285/b8365d49b74c%2C41383%2C1733077080285.1733077080663 with entries=308, filesize=306.54 KB; new WAL /user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285/b8365d49b74c%2C41383%2C1733077080285.1733077148220 2024-12-01T18:19:08,228 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38401:38401),(127.0.0.1/127.0.0.1:46197:46197)] 2024-12-01T18:19:08,228 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285/b8365d49b74c%2C41383%2C1733077080285.1733077080663 is not closed yet, will try archiving it next time 2024-12-01T18:19:08,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741833_1009 (size=313906) 2024-12-01T18:19:08,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741833_1009 (size=313906) 2024-12-01T18:19:08,231 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 0d069298e0443478925bd2f47d58cef2 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-12-01T18:19:08,235 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/d5976f003f2e4664893397a43f0e8804 is 1080, key is row0252/info:/1733077146213/Put/seqid=0 2024-12-01T18:19:08,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741873_1049 (size=10357) 2024-12-01T18:19:08,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741873_1049 (size=10357) 2024-12-01T18:19:08,242 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/d5976f003f2e4664893397a43f0e8804 2024-12-01T18:19:08,247 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/.tmp/info/d5976f003f2e4664893397a43f0e8804 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/d5976f003f2e4664893397a43f0e8804 2024-12-01T18:19:08,251 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/d5976f003f2e4664893397a43f0e8804, entries=5, sequenceid=325, filesize=10.1 K 2024-12-01T18:19:08,252 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 0d069298e0443478925bd2f47d58cef2 in 21ms, sequenceid=325, compaction requested=true 2024-12-01T18:19:08,252 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:19:08,252 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.21 KB heapSize=4.13 KB 2024-12-01T18:19:08,256 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/.tmp/info/3297b303f5da46b3a0f4734bf8225af4 is 193, key is TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2./info:regioninfo/1733077106570/Put/seqid=0 2024-12-01T18:19:08,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741874_1050 (size=7803) 2024-12-01T18:19:08,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741874_1050 (size=7803) 2024-12-01T18:19:08,264 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.21 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/.tmp/info/3297b303f5da46b3a0f4734bf8225af4 2024-12-01T18:19:08,268 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/.tmp/info/3297b303f5da46b3a0f4734bf8225af4 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/info/3297b303f5da46b3a0f4734bf8225af4 2024-12-01T18:19:08,273 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/info/3297b303f5da46b3a0f4734bf8225af4, entries=16, sequenceid=24, filesize=7.6 K 2024-12-01T18:19:08,274 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~2.21 KB/2260, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 22ms, sequenceid=24, compaction requested=false 2024-12-01T18:19:08,274 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-01T18:19:08,274 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing ef4cc29d327f6d110395be9a9161ded7 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-01T18:19:08,290 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/namespace/ef4cc29d327f6d110395be9a9161ded7/.tmp/info/9692871f7d6d495ab4e0b468285a1f85 is 45, key is default/info:d/1733077081498/Put/seqid=0 2024-12-01T18:19:08,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741875_1051 (size=5037) 2024-12-01T18:19:08,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741875_1051 (size=5037) 2024-12-01T18:19:08,295 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/namespace/ef4cc29d327f6d110395be9a9161ded7/.tmp/info/9692871f7d6d495ab4e0b468285a1f85 2024-12-01T18:19:08,300 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/namespace/ef4cc29d327f6d110395be9a9161ded7/.tmp/info/9692871f7d6d495ab4e0b468285a1f85 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/namespace/ef4cc29d327f6d110395be9a9161ded7/info/9692871f7d6d495ab4e0b468285a1f85 2024-12-01T18:19:08,305 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/namespace/ef4cc29d327f6d110395be9a9161ded7/info/9692871f7d6d495ab4e0b468285a1f85, entries=2, sequenceid=6, filesize=4.9 K 2024-12-01T18:19:08,306 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for ef4cc29d327f6d110395be9a9161ded7 in 32ms, sequenceid=6, compaction requested=false 2024-12-01T18:19:08,306 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for ef4cc29d327f6d110395be9a9161ded7: 2024-12-01T18:19:08,306 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 571d6e6376c1f1f14824532620919ac4: 2024-12-01T18:19:08,306 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C41383%2C1733077080285.1733077148306 2024-12-01T18:19:08,312 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285/b8365d49b74c%2C41383%2C1733077080285.1733077148220 with entries=4, filesize=1.22 KB; new WAL /user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285/b8365d49b74c%2C41383%2C1733077080285.1733077148306 2024-12-01T18:19:08,312 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38401:38401),(127.0.0.1/127.0.0.1:46197:46197)] 2024-12-01T18:19:08,312 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285/b8365d49b74c%2C41383%2C1733077080285.1733077148220 is not closed yet, will try archiving it next time 2024-12-01T18:19:08,313 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285/b8365d49b74c%2C41383%2C1733077080285.1733077080663 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/oldWALs/b8365d49b74c%2C41383%2C1733077080285.1733077080663 2024-12-01T18:19:08,313 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T18:19:08,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741872_1048 (size=1255) 2024-12-01T18:19:08,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741872_1048 (size=1255) 2024-12-01T18:19:08,315 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285/b8365d49b74c%2C41383%2C1733077080285.1733077148220 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/oldWALs/b8365d49b74c%2C41383%2C1733077080285.1733077148220 2024-12-01T18:19:08,413 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-01T18:19:08,414 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-01T18:19:08,414 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7bfbd5bb to 127.0.0.1:52110 2024-12-01T18:19:08,414 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:19:08,414 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T18:19:08,414 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1842841075, stopped=false 2024-12-01T18:19:08,414 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=b8365d49b74c,33181,1733077080238 2024-12-01T18:19:08,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:19:08,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:19:08,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:08,416 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-01T18:19:08,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:08,416 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:19:08,417 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b8365d49b74c,41383,1733077080285' ***** 2024-12-01T18:19:08,417 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-01T18:19:08,417 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T18:19:08,417 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-01T18:19:08,417 INFO [RS:0;b8365d49b74c:41383 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T18:19:08,417 INFO [RS:0;b8365d49b74c:41383 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T18:19:08,417 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(3579): Received CLOSE for 0d069298e0443478925bd2f47d58cef2 2024-12-01T18:19:08,417 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:19:08,417 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 0d069298e0443478925bd2f47d58cef2, disabling compactions & flushes 2024-12-01T18:19:08,417 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:19:08,417 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:19:08,417 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(3579): Received CLOSE for ef4cc29d327f6d110395be9a9161ded7 2024-12-01T18:19:08,417 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. after waiting 0 ms 2024-12-01T18:19:08,417 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:19:08,417 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(3579): Received CLOSE for 571d6e6376c1f1f14824532620919ac4 2024-12-01T18:19:08,418 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(1224): stopping server b8365d49b74c,41383,1733077080285 2024-12-01T18:19:08,418 DEBUG [RS:0;b8365d49b74c:41383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:19:08,418 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T18:19:08,418 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T18:19:08,418 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T18:19:08,418 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-01T18:19:08,418 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:19:08,418 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(1599): Waiting on 4 regions to close 2024-12-01T18:19:08,418 DEBUG [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(1603): Online Regions={0d069298e0443478925bd2f47d58cef2=TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2., 1588230740=hbase:meta,,1.1588230740, ef4cc29d327f6d110395be9a9161ded7=hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7., 571d6e6376c1f1f14824532620919ac4=TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4.} 2024-12-01T18:19:08,418 DEBUG [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(1629): Waiting on 0d069298e0443478925bd2f47d58cef2, 1588230740, 571d6e6376c1f1f14824532620919ac4, ef4cc29d327f6d110395be9a9161ded7 2024-12-01T18:19:08,418 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:19:08,418 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:19:08,418 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:19:08,418 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:19:08,418 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:19:08,418 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c->hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/6ff3197a34204987bf311c2e692858df-top, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-373ae11c663c475cae0a2c8463897de9, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/396fb8eb69b44942af36b57ecb934481, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-1edf9483c56c400992dfbf975dd84516, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/cd7a3337a35343338dd181be4e0ba657, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/e45695a84add48e38c755c5c0911718d, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/afeb2743e2e54c79b87dc76ad54102a6, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/ce3f7c37382a4ea8847d26e7ac182763, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/e0e505465b5b4a7980fce95342d9033e, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/3b1dc617f7924078aef82ef19131e8a5, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/f347a07ff22e42e1a87a959bf6bc745d, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/8ce60136ece94fceb3a394c45e3d2fb0, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/ee0bab7e535246408b43c23d784556ea, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/2ec6d033ebcd4b86b49d353e41a7a018, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/8b41f6e85e4246518fa91f83bc2111cb, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/eff185343b194fe78d39758252b1662b, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/d298da5fed6047ad8c39571d7bb19110, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/04f8596ece124cee83deef307f8b6392] to archive 2024-12-01T18:19:08,420 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-01T18:19:08,421 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:19:08,423 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-373ae11c663c475cae0a2c8463897de9 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-373ae11c663c475cae0a2c8463897de9 2024-12-01T18:19:08,425 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/396fb8eb69b44942af36b57ecb934481 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/396fb8eb69b44942af36b57ecb934481 2024-12-01T18:19:08,426 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-1edf9483c56c400992dfbf975dd84516 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/TestLogRolling-testLogRolling=b658ffcf569fa9b7017ef7c6129a394c-1edf9483c56c400992dfbf975dd84516 2024-12-01T18:19:08,427 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/meta/1588230740/recovered.edits/27.seqid, newMaxSeqId=27, maxSeqId=1 2024-12-01T18:19:08,428 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T18:19:08,428 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/cd7a3337a35343338dd181be4e0ba657 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/cd7a3337a35343338dd181be4e0ba657 2024-12-01T18:19:08,428 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-01T18:19:08,428 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:19:08,428 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-01T18:19:08,429 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/e45695a84add48e38c755c5c0911718d to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/e45695a84add48e38c755c5c0911718d 2024-12-01T18:19:08,430 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/afeb2743e2e54c79b87dc76ad54102a6 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/afeb2743e2e54c79b87dc76ad54102a6 2024-12-01T18:19:08,432 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/ce3f7c37382a4ea8847d26e7ac182763 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/ce3f7c37382a4ea8847d26e7ac182763 2024-12-01T18:19:08,433 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/e0e505465b5b4a7980fce95342d9033e to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/e0e505465b5b4a7980fce95342d9033e 2024-12-01T18:19:08,434 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/3b1dc617f7924078aef82ef19131e8a5 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/3b1dc617f7924078aef82ef19131e8a5 2024-12-01T18:19:08,435 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/f347a07ff22e42e1a87a959bf6bc745d to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/f347a07ff22e42e1a87a959bf6bc745d 2024-12-01T18:19:08,436 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/8ce60136ece94fceb3a394c45e3d2fb0 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/8ce60136ece94fceb3a394c45e3d2fb0 2024-12-01T18:19:08,438 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/ee0bab7e535246408b43c23d784556ea to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/ee0bab7e535246408b43c23d784556ea 2024-12-01T18:19:08,439 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/2ec6d033ebcd4b86b49d353e41a7a018 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/2ec6d033ebcd4b86b49d353e41a7a018 2024-12-01T18:19:08,440 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/8b41f6e85e4246518fa91f83bc2111cb to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/8b41f6e85e4246518fa91f83bc2111cb 2024-12-01T18:19:08,441 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/eff185343b194fe78d39758252b1662b to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/eff185343b194fe78d39758252b1662b 2024-12-01T18:19:08,442 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/d298da5fed6047ad8c39571d7bb19110 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/d298da5fed6047ad8c39571d7bb19110 2024-12-01T18:19:08,443 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/04f8596ece124cee83deef307f8b6392 to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/info/04f8596ece124cee83deef307f8b6392 2024-12-01T18:19:08,447 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/0d069298e0443478925bd2f47d58cef2/recovered.edits/328.seqid, newMaxSeqId=328, maxSeqId=123 2024-12-01T18:19:08,447 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:19:08,447 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 0d069298e0443478925bd2f47d58cef2: 2024-12-01T18:19:08,447 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733077105855.0d069298e0443478925bd2f47d58cef2. 2024-12-01T18:19:08,447 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing ef4cc29d327f6d110395be9a9161ded7, disabling compactions & flushes 2024-12-01T18:19:08,448 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. 2024-12-01T18:19:08,448 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. 2024-12-01T18:19:08,448 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. after waiting 0 ms 2024-12-01T18:19:08,448 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. 2024-12-01T18:19:08,450 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/hbase/namespace/ef4cc29d327f6d110395be9a9161ded7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T18:19:08,451 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. 2024-12-01T18:19:08,451 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for ef4cc29d327f6d110395be9a9161ded7: 2024-12-01T18:19:08,451 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733077081087.ef4cc29d327f6d110395be9a9161ded7. 2024-12-01T18:19:08,451 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 571d6e6376c1f1f14824532620919ac4, disabling compactions & flushes 2024-12-01T18:19:08,451 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4. 2024-12-01T18:19:08,451 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4. 2024-12-01T18:19:08,451 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4. after waiting 0 ms 2024-12-01T18:19:08,451 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4. 2024-12-01T18:19:08,451 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4/info/6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c->hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/b658ffcf569fa9b7017ef7c6129a394c/info/6ff3197a34204987bf311c2e692858df-bottom] to archive 2024-12-01T18:19:08,452 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-01T18:19:08,453 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4/info/6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c to hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/archive/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4/info/6ff3197a34204987bf311c2e692858df.b658ffcf569fa9b7017ef7c6129a394c 2024-12-01T18:19:08,456 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/data/default/TestLogRolling-testLogRolling/571d6e6376c1f1f14824532620919ac4/recovered.edits/128.seqid, newMaxSeqId=128, maxSeqId=123 2024-12-01T18:19:08,457 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4. 2024-12-01T18:19:08,457 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 571d6e6376c1f1f14824532620919ac4: 2024-12-01T18:19:08,457 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733077105855.571d6e6376c1f1f14824532620919ac4. 2024-12-01T18:19:08,532 INFO [regionserver/b8365d49b74c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:19:08,558 INFO [regionserver/b8365d49b74c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-01T18:19:08,558 INFO [regionserver/b8365d49b74c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-01T18:19:08,602 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-01T18:19:08,618 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(1250): stopping server b8365d49b74c,41383,1733077080285; all regions closed. 2024-12-01T18:19:08,619 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285 2024-12-01T18:19:08,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741834_1010 (size=9351) 2024-12-01T18:19:08,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741834_1010 (size=9351) 2024-12-01T18:19:08,623 DEBUG [RS:0;b8365d49b74c:41383 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/oldWALs 2024-12-01T18:19:08,623 INFO [RS:0;b8365d49b74c:41383 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b8365d49b74c%2C41383%2C1733077080285.meta:.meta(num 1733077081044) 2024-12-01T18:19:08,623 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/WALs/b8365d49b74c,41383,1733077080285 2024-12-01T18:19:08,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741876_1052 (size=1072) 2024-12-01T18:19:08,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741876_1052 (size=1072) 2024-12-01T18:19:08,627 DEBUG [RS:0;b8365d49b74c:41383 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/oldWALs 2024-12-01T18:19:08,627 INFO [RS:0;b8365d49b74c:41383 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b8365d49b74c%2C41383%2C1733077080285:(num 1733077148306) 2024-12-01T18:19:08,627 DEBUG [RS:0;b8365d49b74c:41383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:19:08,627 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:19:08,627 INFO [RS:0;b8365d49b74c:41383 {}] hbase.ChoreService(370): Chore service for: regionserver/b8365d49b74c:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-01T18:19:08,627 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-01T18:19:08,628 INFO [RS:0;b8365d49b74c:41383 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41383 2024-12-01T18:19:08,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:19:08,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b8365d49b74c,41383,1733077080285 2024-12-01T18:19:08,630 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b8365d49b74c,41383,1733077080285] 2024-12-01T18:19:08,630 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b8365d49b74c,41383,1733077080285; numProcessing=1 2024-12-01T18:19:08,633 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b8365d49b74c,41383,1733077080285 already deleted, retry=false 2024-12-01T18:19:08,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:19:08,633 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b8365d49b74c,41383,1733077080285 expired; onlineServers=0 2024-12-01T18:19:08,633 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b8365d49b74c,33181,1733077080238' ***** 2024-12-01T18:19:08,633 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T18:19:08,633 DEBUG [M:0;b8365d49b74c:33181 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51c11098, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b8365d49b74c/172.17.0.2:0 2024-12-01T18:19:08,633 INFO [M:0;b8365d49b74c:33181 {}] regionserver.HRegionServer(1224): stopping server b8365d49b74c,33181,1733077080238 2024-12-01T18:19:08,633 INFO [M:0;b8365d49b74c:33181 {}] regionserver.HRegionServer(1250): stopping server b8365d49b74c,33181,1733077080238; all regions closed. 2024-12-01T18:19:08,633 DEBUG [M:0;b8365d49b74c:33181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:19:08,633 DEBUG [M:0;b8365d49b74c:33181 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T18:19:08,634 DEBUG [M:0;b8365d49b74c:33181 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T18:19:08,634 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T18:19:08,634 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733077080444 {}] cleaner.HFileCleaner(306): Exit Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733077080444,5,FailOnTimeoutGroup] 2024-12-01T18:19:08,634 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733077080445 {}] cleaner.HFileCleaner(306): Exit Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733077080445,5,FailOnTimeoutGroup] 2024-12-01T18:19:08,634 INFO [M:0;b8365d49b74c:33181 {}] hbase.ChoreService(370): Chore service for: master/b8365d49b74c:0 had [] on shutdown 2024-12-01T18:19:08,634 DEBUG [M:0;b8365d49b74c:33181 {}] master.HMaster(1733): Stopping service threads 2024-12-01T18:19:08,634 INFO [M:0;b8365d49b74c:33181 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T18:19:08,634 INFO [M:0;b8365d49b74c:33181 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T18:19:08,634 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T18:19:08,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T18:19:08,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:08,635 DEBUG [M:0;b8365d49b74c:33181 {}] zookeeper.ZKUtil(347): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-01T18:19:08,635 WARN [M:0;b8365d49b74c:33181 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-01T18:19:08,635 INFO [M:0;b8365d49b74c:33181 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-01T18:19:08,635 INFO [M:0;b8365d49b74c:33181 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T18:19:08,635 DEBUG [M:0;b8365d49b74c:33181 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:19:08,635 INFO [M:0;b8365d49b74c:33181 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:19:08,635 DEBUG [M:0;b8365d49b74c:33181 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:19:08,635 DEBUG [M:0;b8365d49b74c:33181 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:19:08,635 DEBUG [M:0;b8365d49b74c:33181 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:19:08,635 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:19:08,636 INFO [M:0;b8365d49b74c:33181 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=66.45 KB heapSize=81.70 KB 2024-12-01T18:19:08,651 DEBUG [M:0;b8365d49b74c:33181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cf5ee889cd3b40a79cd69dbd396d323e is 82, key is hbase:meta,,1/info:regioninfo/1733077081066/Put/seqid=0 2024-12-01T18:19:08,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741877_1053 (size=5672) 2024-12-01T18:19:08,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741877_1053 (size=5672) 2024-12-01T18:19:08,657 INFO [M:0;b8365d49b74c:33181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cf5ee889cd3b40a79cd69dbd396d323e 2024-12-01T18:19:08,675 DEBUG [M:0;b8365d49b74c:33181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d64d4fdbc41642e1b8214cb11f144b51 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733077081971/Put/seqid=0 2024-12-01T18:19:08,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741878_1054 (size=7286) 2024-12-01T18:19:08,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741878_1054 (size=7286) 2024-12-01T18:19:08,681 INFO [M:0;b8365d49b74c:33181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65.85 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d64d4fdbc41642e1b8214cb11f144b51 2024-12-01T18:19:08,685 INFO [M:0;b8365d49b74c:33181 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d64d4fdbc41642e1b8214cb11f144b51 2024-12-01T18:19:08,700 DEBUG [M:0;b8365d49b74c:33181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/75e821fe6f8044e6987dde7cbbf4a0cb is 69, key is b8365d49b74c,41383,1733077080285/rs:state/1733077080519/Put/seqid=0 2024-12-01T18:19:08,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741879_1055 (size=5156) 2024-12-01T18:19:08,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741879_1055 (size=5156) 2024-12-01T18:19:08,705 INFO [M:0;b8365d49b74c:33181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/75e821fe6f8044e6987dde7cbbf4a0cb 2024-12-01T18:19:08,731 DEBUG [M:0;b8365d49b74c:33181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/585e94b6e07c4948a33b88a695940faa is 52, key is load_balancer_on/state:d/1733077081610/Put/seqid=0 2024-12-01T18:19:08,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:19:08,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x1004ecd51850001, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:19:08,732 INFO [RS:0;b8365d49b74c:41383 {}] regionserver.HRegionServer(1307): Exiting; stopping=b8365d49b74c,41383,1733077080285; zookeeper connection closed. 2024-12-01T18:19:08,732 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6cfc693e {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6cfc693e 2024-12-01T18:19:08,733 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-01T18:19:08,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741880_1056 (size=5056) 2024-12-01T18:19:08,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741880_1056 (size=5056) 2024-12-01T18:19:08,737 INFO [M:0;b8365d49b74c:33181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/585e94b6e07c4948a33b88a695940faa 2024-12-01T18:19:08,742 DEBUG [M:0;b8365d49b74c:33181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cf5ee889cd3b40a79cd69dbd396d323e as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cf5ee889cd3b40a79cd69dbd396d323e 2024-12-01T18:19:08,746 INFO [M:0;b8365d49b74c:33181 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cf5ee889cd3b40a79cd69dbd396d323e, entries=8, sequenceid=164, filesize=5.5 K 2024-12-01T18:19:08,747 DEBUG [M:0;b8365d49b74c:33181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d64d4fdbc41642e1b8214cb11f144b51 as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d64d4fdbc41642e1b8214cb11f144b51 2024-12-01T18:19:08,751 INFO [M:0;b8365d49b74c:33181 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d64d4fdbc41642e1b8214cb11f144b51 2024-12-01T18:19:08,751 INFO [M:0;b8365d49b74c:33181 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d64d4fdbc41642e1b8214cb11f144b51, entries=18, sequenceid=164, filesize=7.1 K 2024-12-01T18:19:08,752 DEBUG [M:0;b8365d49b74c:33181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/75e821fe6f8044e6987dde7cbbf4a0cb as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/75e821fe6f8044e6987dde7cbbf4a0cb 2024-12-01T18:19:08,756 INFO [M:0;b8365d49b74c:33181 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/75e821fe6f8044e6987dde7cbbf4a0cb, entries=1, sequenceid=164, filesize=5.0 K 2024-12-01T18:19:08,757 DEBUG [M:0;b8365d49b74c:33181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/585e94b6e07c4948a33b88a695940faa as hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/585e94b6e07c4948a33b88a695940faa 2024-12-01T18:19:08,761 INFO [M:0;b8365d49b74c:33181 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37907/user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/585e94b6e07c4948a33b88a695940faa, entries=1, sequenceid=164, filesize=4.9 K 2024-12-01T18:19:08,761 INFO [M:0;b8365d49b74c:33181 {}] regionserver.HRegion(3040): Finished flush of dataSize ~66.45 KB/68043, heapSize ~81.63 KB/83592, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=164, compaction requested=false 2024-12-01T18:19:08,763 INFO [M:0;b8365d49b74c:33181 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:19:08,763 DEBUG [M:0;b8365d49b74c:33181 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:19:08,763 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/14f76499-f00b-b6eb-2857-55e044859171/MasterData/WALs/b8365d49b74c,33181,1733077080238 2024-12-01T18:19:08,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45821 is added to blk_1073741830_1006 (size=79272) 2024-12-01T18:19:08,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41531 is added to blk_1073741830_1006 (size=79272) 2024-12-01T18:19:08,765 INFO [M:0;b8365d49b74c:33181 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-01T18:19:08,765 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-01T18:19:08,766 INFO [M:0;b8365d49b74c:33181 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33181 2024-12-01T18:19:08,768 DEBUG [M:0;b8365d49b74c:33181 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/b8365d49b74c,33181,1733077080238 already deleted, retry=false 2024-12-01T18:19:08,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:19:08,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33181-0x1004ecd51850000, quorum=127.0.0.1:52110, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:19:08,869 INFO [M:0;b8365d49b74c:33181 {}] regionserver.HRegionServer(1307): Exiting; stopping=b8365d49b74c,33181,1733077080238; zookeeper connection closed. 2024-12-01T18:19:08,872 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@434b09e4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:19:08,872 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17b5bb29{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:19:08,872 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:19:08,872 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4be9feb9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:19:08,873 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33dd8821{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/hadoop.log.dir/,STOPPED} 2024-12-01T18:19:08,874 WARN [BP-487273436-172.17.0.2-1733077079579 heartbeating to localhost/127.0.0.1:37907 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:19:08,874 WARN [BP-487273436-172.17.0.2-1733077079579 heartbeating to localhost/127.0.0.1:37907 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-487273436-172.17.0.2-1733077079579 (Datanode Uuid 5b572541-fb21-4c36-919e-521eec6cafa6) service to localhost/127.0.0.1:37907 2024-12-01T18:19:08,874 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:19:08,874 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:19:08,875 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/cluster_c8794203-4bde-4c2b-95b3-c3db3f2b7147/dfs/data/data3/current/BP-487273436-172.17.0.2-1733077079579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:19:08,875 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/cluster_c8794203-4bde-4c2b-95b3-c3db3f2b7147/dfs/data/data4/current/BP-487273436-172.17.0.2-1733077079579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:19:08,875 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:19:08,877 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4d11ec04{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:19:08,877 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17c78fce{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:19:08,877 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:19:08,878 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bf6e2c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:19:08,878 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@555a9157{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/hadoop.log.dir/,STOPPED} 2024-12-01T18:19:08,879 WARN [BP-487273436-172.17.0.2-1733077079579 heartbeating to localhost/127.0.0.1:37907 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:19:08,879 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:19:08,879 WARN [BP-487273436-172.17.0.2-1733077079579 heartbeating to localhost/127.0.0.1:37907 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-487273436-172.17.0.2-1733077079579 (Datanode Uuid 4b59fa24-905f-4b4f-95e8-423bc64e71a5) service to localhost/127.0.0.1:37907 2024-12-01T18:19:08,879 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:19:08,880 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/cluster_c8794203-4bde-4c2b-95b3-c3db3f2b7147/dfs/data/data1/current/BP-487273436-172.17.0.2-1733077079579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:19:08,880 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/cluster_c8794203-4bde-4c2b-95b3-c3db3f2b7147/dfs/data/data2/current/BP-487273436-172.17.0.2-1733077079579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:19:08,880 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:19:08,887 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@34cb6972{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:19:08,887 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@191bd0c3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:19:08,887 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:19:08,887 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7cad81e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:19:08,887 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1297c717{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/hadoop.log.dir/,STOPPED} 2024-12-01T18:19:08,898 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-01T18:19:08,934 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-01T18:19:08,942 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=124 (was 111) - Thread LEAK? -, OpenFileDescriptor=490 (was 466) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=127 (was 114) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2845 (was 2881) 2024-12-01T18:19:08,950 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=124, OpenFileDescriptor=490, MaxFileDescriptor=1048576, SystemLoadAverage=127, ProcessCount=11, AvailableMemoryMB=2844 2024-12-01T18:19:08,950 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T18:19:08,950 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/hadoop.log.dir so I do NOT create it in target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff 2024-12-01T18:19:08,950 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/37ba0588-8bb7-4065-1501-aef12ded695d/hadoop.tmp.dir so I do NOT create it in target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff 2024-12-01T18:19:08,950 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/cluster_8fd8916a-a496-31be-98e8-12e9b454bdee, deleteOnExit=true 2024-12-01T18:19:08,950 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-01T18:19:08,950 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/test.cache.data in system properties and HBase conf 2024-12-01T18:19:08,950 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T18:19:08,950 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/hadoop.log.dir in system properties and HBase conf 2024-12-01T18:19:08,950 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T18:19:08,950 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T18:19:08,950 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-01T18:19:08,950 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/nfs.dump.dir in system properties and HBase conf 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/java.io.tmpdir in system properties and HBase conf 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T18:19:08,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T18:19:08,964 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-01T18:19:09,058 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:19:09,064 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:19:09,066 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:19:09,066 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:19:09,066 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:19:09,079 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:19:09,080 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cd921e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:19:09,080 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75dc32b9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:19:09,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b34cff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/java.io.tmpdir/jetty-localhost-37775-hadoop-hdfs-3_4_1-tests_jar-_-any-17538577291564420885/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:19:09,212 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20acf7e0{HTTP/1.1, (http/1.1)}{localhost:37775} 2024-12-01T18:19:09,212 INFO [Time-limited test {}] server.Server(415): Started @356505ms 2024-12-01T18:19:09,225 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-01T18:19:09,281 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:19:09,284 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:19:09,284 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:19:09,284 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:19:09,284 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:19:09,285 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e5c24a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:19:09,285 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f8c3143{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:19:09,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@84ee776{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/java.io.tmpdir/jetty-localhost-33133-hadoop-hdfs-3_4_1-tests_jar-_-any-4738363040359312779/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:19:09,398 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a055ed1{HTTP/1.1, (http/1.1)}{localhost:33133} 2024-12-01T18:19:09,398 INFO [Time-limited test {}] server.Server(415): Started @356692ms 2024-12-01T18:19:09,399 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:19:09,435 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:19:09,438 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:19:09,440 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:19:09,440 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:19:09,440 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:19:09,440 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f12d55f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:19:09,441 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70952693{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:19:09,480 WARN [Thread-2191 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/cluster_8fd8916a-a496-31be-98e8-12e9b454bdee/dfs/data/data2/current/BP-1075919058-172.17.0.2-1733077148986/current, will proceed with Du for space computation calculation, 2024-12-01T18:19:09,480 WARN [Thread-2190 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/cluster_8fd8916a-a496-31be-98e8-12e9b454bdee/dfs/data/data1/current/BP-1075919058-172.17.0.2-1733077148986/current, will proceed with Du for space computation calculation, 2024-12-01T18:19:09,508 WARN [Thread-2169 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:19:09,511 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3d6f4189b68ed5ed with lease ID 0xecf07515816aaab5: Processing first storage report for DS-60136da1-082b-44e5-a14c-2acd2f719ef9 from datanode DatanodeRegistration(127.0.0.1:46275, datanodeUuid=3dc3bdb3-cf47-49b0-a819-129739755a88, infoPort=35899, infoSecurePort=0, ipcPort=46337, storageInfo=lv=-57;cid=testClusterID;nsid=1419850557;c=1733077148986) 2024-12-01T18:19:09,511 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3d6f4189b68ed5ed with lease ID 0xecf07515816aaab5: from storage DS-60136da1-082b-44e5-a14c-2acd2f719ef9 node DatanodeRegistration(127.0.0.1:46275, datanodeUuid=3dc3bdb3-cf47-49b0-a819-129739755a88, infoPort=35899, infoSecurePort=0, ipcPort=46337, storageInfo=lv=-57;cid=testClusterID;nsid=1419850557;c=1733077148986), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T18:19:09,511 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3d6f4189b68ed5ed with lease ID 0xecf07515816aaab5: Processing first storage report for DS-4ba9532e-7c4a-4ae2-800e-ca7df7ebefa0 from datanode DatanodeRegistration(127.0.0.1:46275, datanodeUuid=3dc3bdb3-cf47-49b0-a819-129739755a88, infoPort=35899, infoSecurePort=0, ipcPort=46337, storageInfo=lv=-57;cid=testClusterID;nsid=1419850557;c=1733077148986) 2024-12-01T18:19:09,511 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3d6f4189b68ed5ed with lease ID 0xecf07515816aaab5: from storage DS-4ba9532e-7c4a-4ae2-800e-ca7df7ebefa0 node DatanodeRegistration(127.0.0.1:46275, datanodeUuid=3dc3bdb3-cf47-49b0-a819-129739755a88, infoPort=35899, infoSecurePort=0, ipcPort=46337, storageInfo=lv=-57;cid=testClusterID;nsid=1419850557;c=1733077148986), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:19:09,562 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4cf2aa1e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/java.io.tmpdir/jetty-localhost-39293-hadoop-hdfs-3_4_1-tests_jar-_-any-1497484438568945326/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:19:09,563 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@440c487f{HTTP/1.1, (http/1.1)}{localhost:39293} 2024-12-01T18:19:09,563 INFO [Time-limited test {}] server.Server(415): Started @356856ms 2024-12-01T18:19:09,564 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:19:09,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:19:09,663 WARN [Thread-2216 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/cluster_8fd8916a-a496-31be-98e8-12e9b454bdee/dfs/data/data3/current/BP-1075919058-172.17.0.2-1733077148986/current, will proceed with Du for space computation calculation, 2024-12-01T18:19:09,663 WARN [Thread-2217 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/cluster_8fd8916a-a496-31be-98e8-12e9b454bdee/dfs/data/data4/current/BP-1075919058-172.17.0.2-1733077148986/current, will proceed with Du for space computation calculation, 2024-12-01T18:19:09,679 WARN [Thread-2205 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:19:09,682 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd87e60e0ab34102f with lease ID 0xecf07515816aaab6: Processing first storage report for DS-b1f86b2b-b5c2-430d-8357-882d24260faa from datanode DatanodeRegistration(127.0.0.1:41897, datanodeUuid=b9245653-5c4c-48e2-8687-1f21a7668664, infoPort=33493, infoSecurePort=0, ipcPort=43119, storageInfo=lv=-57;cid=testClusterID;nsid=1419850557;c=1733077148986) 2024-12-01T18:19:09,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd87e60e0ab34102f with lease ID 0xecf07515816aaab6: from storage DS-b1f86b2b-b5c2-430d-8357-882d24260faa node DatanodeRegistration(127.0.0.1:41897, datanodeUuid=b9245653-5c4c-48e2-8687-1f21a7668664, infoPort=33493, infoSecurePort=0, ipcPort=43119, storageInfo=lv=-57;cid=testClusterID;nsid=1419850557;c=1733077148986), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:19:09,682 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd87e60e0ab34102f with lease ID 0xecf07515816aaab6: Processing first storage report for DS-4a7f6933-6b42-4976-aefe-49683f8bc490 from datanode DatanodeRegistration(127.0.0.1:41897, datanodeUuid=b9245653-5c4c-48e2-8687-1f21a7668664, infoPort=33493, infoSecurePort=0, ipcPort=43119, storageInfo=lv=-57;cid=testClusterID;nsid=1419850557;c=1733077148986) 2024-12-01T18:19:09,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd87e60e0ab34102f with lease ID 0xecf07515816aaab6: from storage DS-4a7f6933-6b42-4976-aefe-49683f8bc490 node DatanodeRegistration(127.0.0.1:41897, datanodeUuid=b9245653-5c4c-48e2-8687-1f21a7668664, infoPort=33493, infoSecurePort=0, ipcPort=43119, storageInfo=lv=-57;cid=testClusterID;nsid=1419850557;c=1733077148986), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:19:09,687 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff 2024-12-01T18:19:09,690 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/cluster_8fd8916a-a496-31be-98e8-12e9b454bdee/zookeeper_0, clientPort=60348, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/cluster_8fd8916a-a496-31be-98e8-12e9b454bdee/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/cluster_8fd8916a-a496-31be-98e8-12e9b454bdee/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T18:19:09,691 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=60348 2024-12-01T18:19:09,692 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:19:09,693 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:19:09,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:19:09,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:19:09,703 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d with version=8 2024-12-01T18:19:09,703 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39817/user/jenkins/test-data/5a0691b9-8a31-578c-ede0-bfc8b5e03d12/hbase-staging 2024-12-01T18:19:09,705 INFO [Time-limited test {}] client.ConnectionUtils(129): master/b8365d49b74c:0 server-side Connection retries=45 2024-12-01T18:19:09,705 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:19:09,705 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:19:09,705 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:19:09,705 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:19:09,705 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:19:09,705 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:19:09,705 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:19:09,706 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:34251 2024-12-01T18:19:09,706 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:19:09,707 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:19:09,709 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:34251 connecting to ZooKeeper ensemble=127.0.0.1:60348 2024-12-01T18:19:09,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:342510x0, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:19:09,716 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34251-0x1004ece60df0000 connected 2024-12-01T18:19:09,732 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:19:09,733 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:19:09,733 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:19:09,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34251 2024-12-01T18:19:09,738 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34251 2024-12-01T18:19:09,739 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34251 2024-12-01T18:19:09,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34251 2024-12-01T18:19:09,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34251 2024-12-01T18:19:09,744 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d, hbase.cluster.distributed=false 2024-12-01T18:19:09,759 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b8365d49b74c:0 server-side Connection retries=45 2024-12-01T18:19:09,760 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:19:09,760 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:19:09,760 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:19:09,760 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:19:09,760 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:19:09,760 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:19:09,760 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:19:09,762 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:43869 2024-12-01T18:19:09,762 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T18:19:09,763 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T18:19:09,763 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:19:09,765 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:19:09,767 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:43869 connecting to ZooKeeper ensemble=127.0.0.1:60348 2024-12-01T18:19:09,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:438690x0, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:19:09,770 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43869-0x1004ece60df0001 connected 2024-12-01T18:19:09,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:19:09,771 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:19:09,771 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:19:09,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43869 2024-12-01T18:19:09,776 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43869 2024-12-01T18:19:09,779 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43869 2024-12-01T18:19:09,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43869 2024-12-01T18:19:09,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43869 2024-12-01T18:19:09,782 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/b8365d49b74c,34251,1733077149704 2024-12-01T18:19:09,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:19:09,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:19:09,786 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b8365d49b74c,34251,1733077149704 2024-12-01T18:19:09,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:19:09,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:19:09,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:09,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:09,789 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:19:09,789 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b8365d49b74c,34251,1733077149704 from backup master directory 2024-12-01T18:19:09,789 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:19:09,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b8365d49b74c,34251,1733077149704 2024-12-01T18:19:09,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:19:09,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:19:09,792 WARN [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:19:09,792 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b8365d49b74c,34251,1733077149704 2024-12-01T18:19:09,796 DEBUG [M:0;b8365d49b74c:34251 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b8365d49b74c:34251 2024-12-01T18:19:09,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:19:09,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:19:09,807 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/hbase.id with ID: 77a00a89-0b61-418d-b203-5e07405f3365 2024-12-01T18:19:09,817 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:19:09,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:09,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:09,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:19:09,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:19:09,829 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:19:09,830 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T18:19:09,830 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:19:09,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:19:09,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:19:09,841 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store 2024-12-01T18:19:09,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:19:09,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:19:09,847 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:19:09,847 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:19:09,847 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:19:09,847 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:19:09,847 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:19:09,847 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:19:09,847 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:19:09,848 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:19:09,848 WARN [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/.initializing 2024-12-01T18:19:09,848 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/WALs/b8365d49b74c,34251,1733077149704 2024-12-01T18:19:09,851 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C34251%2C1733077149704, suffix=, logDir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/WALs/b8365d49b74c,34251,1733077149704, archiveDir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/oldWALs, maxLogs=10 2024-12-01T18:19:09,851 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C34251%2C1733077149704.1733077149851 2024-12-01T18:19:09,861 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/WALs/b8365d49b74c,34251,1733077149704/b8365d49b74c%2C34251%2C1733077149704.1733077149851 2024-12-01T18:19:09,861 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35899:35899),(127.0.0.1/127.0.0.1:33493:33493)] 2024-12-01T18:19:09,861 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:19:09,861 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:19:09,861 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:19:09,861 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:19:09,863 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:19:09,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T18:19:09,865 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:19:09,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:19:09,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:19:09,866 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T18:19:09,866 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:19:09,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:19:09,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:19:09,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T18:19:09,868 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:19:09,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:19:09,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:19:09,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T18:19:09,869 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:19:09,870 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:19:09,870 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:19:09,870 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:19:09,872 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T18:19:09,873 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:19:09,875 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:19:09,875 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721731, jitterRate=-0.08227267861366272}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T18:19:09,876 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:19:09,876 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T18:19:09,879 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52de8e6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:19:09,880 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-01T18:19:09,880 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T18:19:09,880 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T18:19:09,880 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T18:19:09,881 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-01T18:19:09,881 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-01T18:19:09,881 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T18:19:09,883 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T18:19:09,884 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T18:19:09,885 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-01T18:19:09,885 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T18:19:09,886 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T18:19:09,887 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-01T18:19:09,887 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T18:19:09,888 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T18:19:09,890 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-01T18:19:09,890 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T18:19:09,893 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T18:19:09,894 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T18:19:09,895 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T18:19:09,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:19:09,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:19:09,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:09,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:09,897 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=b8365d49b74c,34251,1733077149704, sessionid=0x1004ece60df0000, setting cluster-up flag (Was=false) 2024-12-01T18:19:09,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:09,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:09,907 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T18:19:09,908 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b8365d49b74c,34251,1733077149704 2024-12-01T18:19:09,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:09,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:09,915 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T18:19:09,916 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b8365d49b74c,34251,1733077149704 2024-12-01T18:19:09,918 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-01T18:19:09,918 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-01T18:19:09,918 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T18:19:09,918 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b8365d49b74c,34251,1733077149704 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T18:19:09,918 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:19:09,918 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:19:09,918 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:19:09,918 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b8365d49b74c:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:19:09,919 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b8365d49b74c:0, corePoolSize=10, maxPoolSize=10 2024-12-01T18:19:09,919 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:19:09,919 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b8365d49b74c:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:19:09,919 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:19:09,919 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733077179919 2024-12-01T18:19:09,920 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T18:19:09,920 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T18:19:09,920 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T18:19:09,920 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T18:19:09,920 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T18:19:09,920 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T18:19:09,920 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:19:09,920 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-01T18:19:09,920 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:09,920 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T18:19:09,920 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T18:19:09,921 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T18:19:09,921 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T18:19:09,921 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T18:19:09,921 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:19:09,921 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733077149921,5,FailOnTimeoutGroup] 2024-12-01T18:19:09,921 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733077149921,5,FailOnTimeoutGroup] 2024-12-01T18:19:09,921 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:09,921 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:19:09,921 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T18:19:09,921 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:09,921 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:09,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741831_1007 (size=1039) 2024-12-01T18:19:09,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741831_1007 (size=1039) 2024-12-01T18:19:09,928 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-01T18:19:09,928 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d 2024-12-01T18:19:09,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:19:09,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:19:09,934 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:19:09,935 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:19:09,936 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:19:09,936 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:19:09,937 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:19:09,937 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:19:09,938 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:19:09,938 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:19:09,938 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:19:09,938 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:19:09,939 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:19:09,939 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:19:09,939 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:19:09,940 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740 2024-12-01T18:19:09,940 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740 2024-12-01T18:19:09,942 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-01T18:19:09,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-01T18:19:09,945 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:19:09,945 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851428, jitterRate=0.08264759182929993}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-01T18:19:09,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-01T18:19:09,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:19:09,945 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:19:09,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:19:09,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:19:09,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:19:09,946 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-01T18:19:09,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:19:09,947 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:19:09,947 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-01T18:19:09,947 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T18:19:09,948 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T18:19:09,948 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T18:19:09,994 DEBUG [RS:0;b8365d49b74c:43869 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b8365d49b74c:43869 2024-12-01T18:19:09,995 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(1008): ClusterId : 77a00a89-0b61-418d-b203-5e07405f3365 2024-12-01T18:19:09,995 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T18:19:09,999 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T18:19:09,999 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T18:19:10,005 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T18:19:10,005 DEBUG [RS:0;b8365d49b74c:43869 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c52ab16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:19:10,005 DEBUG [RS:0;b8365d49b74c:43869 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63b605e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b8365d49b74c/172.17.0.2:0 2024-12-01T18:19:10,005 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-01T18:19:10,005 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-01T18:19:10,005 DEBUG [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-01T18:19:10,006 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(3073): reportForDuty to master=b8365d49b74c,34251,1733077149704 with isa=b8365d49b74c/172.17.0.2:43869, startcode=1733077149759 2024-12-01T18:19:10,006 DEBUG [RS:0;b8365d49b74c:43869 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:19:10,008 INFO [RS-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40997, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:19:10,008 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34251 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b8365d49b74c,43869,1733077149759 2024-12-01T18:19:10,008 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34251 {}] master.ServerManager(486): Registering regionserver=b8365d49b74c,43869,1733077149759 2024-12-01T18:19:10,010 DEBUG [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d 2024-12-01T18:19:10,010 DEBUG [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:40897 2024-12-01T18:19:10,010 DEBUG [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-01T18:19:10,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:19:10,012 DEBUG [RS:0;b8365d49b74c:43869 {}] zookeeper.ZKUtil(111): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b8365d49b74c,43869,1733077149759 2024-12-01T18:19:10,012 WARN [RS:0;b8365d49b74c:43869 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:19:10,012 INFO [RS:0;b8365d49b74c:43869 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:19:10,013 DEBUG [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/b8365d49b74c,43869,1733077149759 2024-12-01T18:19:10,013 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b8365d49b74c,43869,1733077149759] 2024-12-01T18:19:10,015 DEBUG [RS:0;b8365d49b74c:43869 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-01T18:19:10,015 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T18:19:10,017 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T18:19:10,017 INFO [RS:0;b8365d49b74c:43869 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:19:10,017 INFO [RS:0;b8365d49b74c:43869 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:10,017 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-01T18:19:10,018 INFO [RS:0;b8365d49b74c:43869 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:10,018 DEBUG [RS:0;b8365d49b74c:43869 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:19:10,018 DEBUG [RS:0;b8365d49b74c:43869 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:19:10,018 DEBUG [RS:0;b8365d49b74c:43869 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:19:10,018 DEBUG [RS:0;b8365d49b74c:43869 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:19:10,018 DEBUG [RS:0;b8365d49b74c:43869 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:19:10,018 DEBUG [RS:0;b8365d49b74c:43869 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b8365d49b74c:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:19:10,018 DEBUG [RS:0;b8365d49b74c:43869 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:19:10,018 DEBUG [RS:0;b8365d49b74c:43869 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:19:10,018 DEBUG [RS:0;b8365d49b74c:43869 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:19:10,018 DEBUG [RS:0;b8365d49b74c:43869 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:19:10,018 DEBUG [RS:0;b8365d49b74c:43869 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b8365d49b74c:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:19:10,018 DEBUG [RS:0;b8365d49b74c:43869 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:19:10,018 DEBUG [RS:0;b8365d49b74c:43869 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b8365d49b74c:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:19:10,023 INFO [RS:0;b8365d49b74c:43869 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:10,023 INFO [RS:0;b8365d49b74c:43869 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:10,023 INFO [RS:0;b8365d49b74c:43869 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:10,023 INFO [RS:0;b8365d49b74c:43869 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:10,023 INFO [RS:0;b8365d49b74c:43869 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,43869,1733077149759-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:19:10,037 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T18:19:10,037 INFO [RS:0;b8365d49b74c:43869 {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,43869,1733077149759-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:10,051 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.Replication(204): b8365d49b74c,43869,1733077149759 started 2024-12-01T18:19:10,051 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(1767): Serving as b8365d49b74c,43869,1733077149759, RpcServer on b8365d49b74c/172.17.0.2:43869, sessionid=0x1004ece60df0001 2024-12-01T18:19:10,051 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T18:19:10,051 DEBUG [RS:0;b8365d49b74c:43869 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b8365d49b74c,43869,1733077149759 2024-12-01T18:19:10,051 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,43869,1733077149759' 2024-12-01T18:19:10,051 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T18:19:10,052 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T18:19:10,052 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T18:19:10,052 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T18:19:10,052 DEBUG [RS:0;b8365d49b74c:43869 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b8365d49b74c,43869,1733077149759 2024-12-01T18:19:10,052 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b8365d49b74c,43869,1733077149759' 2024-12-01T18:19:10,052 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T18:19:10,052 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T18:19:10,053 DEBUG [RS:0;b8365d49b74c:43869 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T18:19:10,053 INFO [RS:0;b8365d49b74c:43869 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T18:19:10,053 INFO [RS:0;b8365d49b74c:43869 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T18:19:10,099 WARN [b8365d49b74c:34251 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-01T18:19:10,155 INFO [RS:0;b8365d49b74c:43869 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C43869%2C1733077149759, suffix=, logDir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/b8365d49b74c,43869,1733077149759, archiveDir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/oldWALs, maxLogs=32 2024-12-01T18:19:10,155 INFO [RS:0;b8365d49b74c:43869 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C43869%2C1733077149759.1733077150155 2024-12-01T18:19:10,161 INFO [RS:0;b8365d49b74c:43869 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/b8365d49b74c,43869,1733077149759/b8365d49b74c%2C43869%2C1733077149759.1733077150155 2024-12-01T18:19:10,161 DEBUG [RS:0;b8365d49b74c:43869 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35899:35899),(127.0.0.1/127.0.0.1:33493:33493)] 2024-12-01T18:19:10,349 DEBUG [b8365d49b74c:34251 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-01T18:19:10,349 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b8365d49b74c,43869,1733077149759 2024-12-01T18:19:10,350 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b8365d49b74c,43869,1733077149759, state=OPENING 2024-12-01T18:19:10,354 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T18:19:10,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:10,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:10,355 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=b8365d49b74c,43869,1733077149759}] 2024-12-01T18:19:10,356 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:19:10,356 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:19:10,508 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b8365d49b74c,43869,1733077149759 2024-12-01T18:19:10,508 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T18:19:10,510 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52480, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T18:19:10,513 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-01T18:19:10,513 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:19:10,514 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b8365d49b74c%2C43869%2C1733077149759.meta, suffix=.meta, logDir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/b8365d49b74c,43869,1733077149759, archiveDir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/oldWALs, maxLogs=32 2024-12-01T18:19:10,515 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b8365d49b74c%2C43869%2C1733077149759.meta.1733077150515.meta 2024-12-01T18:19:10,521 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/b8365d49b74c,43869,1733077149759/b8365d49b74c%2C43869%2C1733077149759.meta.1733077150515.meta 2024-12-01T18:19:10,521 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33493:33493),(127.0.0.1/127.0.0.1:35899:35899)] 2024-12-01T18:19:10,521 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:19:10,522 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T18:19:10,522 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T18:19:10,522 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T18:19:10,522 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T18:19:10,522 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:19:10,522 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-01T18:19:10,522 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-01T18:19:10,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:19:10,524 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:19:10,524 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:19:10,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:19:10,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:19:10,525 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:19:10,526 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:19:10,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:19:10,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:19:10,527 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:19:10,527 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:19:10,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:19:10,528 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740 2024-12-01T18:19:10,529 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740 2024-12-01T18:19:10,530 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-01T18:19:10,531 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-01T18:19:10,532 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870934, jitterRate=0.10745088756084442}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-01T18:19:10,532 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-01T18:19:10,533 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733077150508 2024-12-01T18:19:10,534 DEBUG [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T18:19:10,534 INFO [RS_OPEN_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-01T18:19:10,535 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,43869,1733077149759 2024-12-01T18:19:10,535 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b8365d49b74c,43869,1733077149759, state=OPEN 2024-12-01T18:19:10,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:19:10,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:19:10,540 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:19:10,540 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:19:10,541 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T18:19:10,541 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=b8365d49b74c,43869,1733077149759 in 185 msec 2024-12-01T18:19:10,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T18:19:10,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 594 msec 2024-12-01T18:19:10,545 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 626 msec 2024-12-01T18:19:10,545 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733077150545, completionTime=-1 2024-12-01T18:19:10,545 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-01T18:19:10,545 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-01T18:19:10,545 DEBUG [hconnection-0x1d53be08-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:19:10,546 INFO [RS-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52484, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:19:10,547 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-01T18:19:10,547 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733077210547 2024-12-01T18:19:10,547 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733077270547 2024-12-01T18:19:10,547 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 2 msec 2024-12-01T18:19:10,552 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,34251,1733077149704-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:10,552 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,34251,1733077149704-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:10,552 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,34251,1733077149704-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:10,552 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b8365d49b74c:34251, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:10,552 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:10,552 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-01T18:19:10,552 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:19:10,553 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-01T18:19:10,554 DEBUG [master/b8365d49b74c:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-01T18:19:10,554 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T18:19:10,554 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:19:10,555 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T18:19:10,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741835_1011 (size=358) 2024-12-01T18:19:10,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741835_1011 (size=358) 2024-12-01T18:19:10,564 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8dff54df69800b9dda3c10e113c6ab03, NAME => 'hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d 2024-12-01T18:19:10,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741836_1012 (size=42) 2024-12-01T18:19:10,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741836_1012 (size=42) 2024-12-01T18:19:10,570 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:19:10,570 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 8dff54df69800b9dda3c10e113c6ab03, disabling compactions & flushes 2024-12-01T18:19:10,570 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. 2024-12-01T18:19:10,570 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. 2024-12-01T18:19:10,570 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. after waiting 0 ms 2024-12-01T18:19:10,570 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. 2024-12-01T18:19:10,570 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. 2024-12-01T18:19:10,570 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8dff54df69800b9dda3c10e113c6ab03: 2024-12-01T18:19:10,571 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T18:19:10,572 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733077150571"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733077150571"}]},"ts":"1733077150571"} 2024-12-01T18:19:10,573 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-01T18:19:10,574 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T18:19:10,574 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733077150574"}]},"ts":"1733077150574"} 2024-12-01T18:19:10,575 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-01T18:19:10,578 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=8dff54df69800b9dda3c10e113c6ab03, ASSIGN}] 2024-12-01T18:19:10,579 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=8dff54df69800b9dda3c10e113c6ab03, ASSIGN 2024-12-01T18:19:10,579 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=8dff54df69800b9dda3c10e113c6ab03, ASSIGN; state=OFFLINE, location=b8365d49b74c,43869,1733077149759; forceNewPlan=false, retain=false 2024-12-01T18:19:10,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41701/user/jenkins/test-data/26e018a0-4938-b0a0-350e-4880ae5c5def/WALs/b8365d49b74c,33203,1733076911564/b8365d49b74c%2C33203%2C1733076911564.meta.1733076912389.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-01T18:19:10,730 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=8dff54df69800b9dda3c10e113c6ab03, regionState=OPENING, regionLocation=b8365d49b74c,43869,1733077149759 2024-12-01T18:19:10,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 8dff54df69800b9dda3c10e113c6ab03, server=b8365d49b74c,43869,1733077149759}] 2024-12-01T18:19:10,884 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b8365d49b74c,43869,1733077149759 2024-12-01T18:19:10,887 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. 2024-12-01T18:19:10,887 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 8dff54df69800b9dda3c10e113c6ab03, NAME => 'hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:19:10,887 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 8dff54df69800b9dda3c10e113c6ab03 2024-12-01T18:19:10,888 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:19:10,888 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 8dff54df69800b9dda3c10e113c6ab03 2024-12-01T18:19:10,888 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 8dff54df69800b9dda3c10e113c6ab03 2024-12-01T18:19:10,889 INFO [StoreOpener-8dff54df69800b9dda3c10e113c6ab03-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8dff54df69800b9dda3c10e113c6ab03 2024-12-01T18:19:10,890 INFO [StoreOpener-8dff54df69800b9dda3c10e113c6ab03-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8dff54df69800b9dda3c10e113c6ab03 columnFamilyName info 2024-12-01T18:19:10,890 DEBUG [StoreOpener-8dff54df69800b9dda3c10e113c6ab03-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:19:10,891 INFO [StoreOpener-8dff54df69800b9dda3c10e113c6ab03-1 {}] regionserver.HStore(327): Store=8dff54df69800b9dda3c10e113c6ab03/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:19:10,891 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/namespace/8dff54df69800b9dda3c10e113c6ab03 2024-12-01T18:19:10,891 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/namespace/8dff54df69800b9dda3c10e113c6ab03 2024-12-01T18:19:10,893 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 8dff54df69800b9dda3c10e113c6ab03 2024-12-01T18:19:10,895 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/namespace/8dff54df69800b9dda3c10e113c6ab03/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:19:10,895 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 8dff54df69800b9dda3c10e113c6ab03; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=853237, jitterRate=0.08494777977466583}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:19:10,895 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 8dff54df69800b9dda3c10e113c6ab03: 2024-12-01T18:19:10,896 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03., pid=6, masterSystemTime=1733077150883 2024-12-01T18:19:10,897 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. 2024-12-01T18:19:10,897 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. 2024-12-01T18:19:10,898 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=8dff54df69800b9dda3c10e113c6ab03, regionState=OPEN, openSeqNum=2, regionLocation=b8365d49b74c,43869,1733077149759 2024-12-01T18:19:10,901 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T18:19:10,901 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 8dff54df69800b9dda3c10e113c6ab03, server=b8365d49b74c,43869,1733077149759 in 168 msec 2024-12-01T18:19:10,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T18:19:10,903 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=8dff54df69800b9dda3c10e113c6ab03, ASSIGN in 323 msec 2024-12-01T18:19:10,903 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T18:19:10,903 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733077150903"}]},"ts":"1733077150903"} 2024-12-01T18:19:10,904 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-01T18:19:10,907 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T18:19:10,908 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 355 msec 2024-12-01T18:19:10,954 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-01T18:19:10,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:19:10,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:10,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:10,960 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-01T18:19:10,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:19:10,969 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 9 msec 2024-12-01T18:19:10,971 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-01T18:19:10,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-01T18:19:10,979 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 7 msec 2024-12-01T18:19:10,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-01T18:19:10,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-01T18:19:10,989 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.197sec 2024-12-01T18:19:10,989 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-01T18:19:10,989 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T18:19:10,989 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T18:19:10,989 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T18:19:10,989 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T18:19:10,989 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,34251,1733077149704-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:19:10,989 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,34251,1733077149704-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T18:19:10,991 DEBUG [master/b8365d49b74c:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-01T18:19:10,991 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T18:19:10,991 INFO [master/b8365d49b74c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b8365d49b74c,34251,1733077149704-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:19:11,084 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x29c99297 to 127.0.0.1:60348 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@352fef5d 2024-12-01T18:19:11,089 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5edac22, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:19:11,091 DEBUG [hconnection-0x5590d9e8-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:19:11,092 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52490, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:19:11,094 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=b8365d49b74c,34251,1733077149704 2024-12-01T18:19:11,094 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:19:11,096 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-01T18:19:11,096 INFO [Time-limited test {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:19:11,098 INFO [Time-limited test {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/test.com,8080,1, archiveDir=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/oldWALs, maxLogs=32 2024-12-01T18:19:11,099 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733077151099 2024-12-01T18:19:11,104 INFO [Time-limited test {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/test.com,8080,1/test.com%2C8080%2C1.1733077151099 2024-12-01T18:19:11,104 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35899:35899),(127.0.0.1/127.0.0.1:33493:33493)] 2024-12-01T18:19:11,104 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733077151104 2024-12-01T18:19:11,114 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/test.com,8080,1/test.com%2C8080%2C1.1733077151099 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/test.com,8080,1/test.com%2C8080%2C1.1733077151104 2024-12-01T18:19:11,114 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35899:35899),(127.0.0.1/127.0.0.1:33493:33493)] 2024-12-01T18:19:11,114 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/test.com,8080,1/test.com%2C8080%2C1.1733077151099 is not closed yet, will try archiving it next time 2024-12-01T18:19:11,115 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/test.com,8080,1 2024-12-01T18:19:11,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741837_1013 (size=93) 2024-12-01T18:19:11,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741837_1013 (size=93) 2024-12-01T18:19:11,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741838_1014 (size=93) 2024-12-01T18:19:11,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741838_1014 (size=93) 2024-12-01T18:19:11,120 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/test.com,8080,1/test.com%2C8080%2C1.1733077151099 to hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/oldWALs/test.com%2C8080%2C1.1733077151099 2024-12-01T18:19:11,122 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/oldWALs 2024-12-01T18:19:11,122 INFO [Time-limited test {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733077151104) 2024-12-01T18:19:11,122 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-01T18:19:11,123 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x29c99297 to 127.0.0.1:60348 2024-12-01T18:19:11,123 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:19:11,123 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T18:19:11,123 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1621512685, stopped=false 2024-12-01T18:19:11,123 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=b8365d49b74c,34251,1733077149704 2024-12-01T18:19:11,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:19:11,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:19:11,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:11,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:11,125 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-01T18:19:11,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:19:11,126 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:19:11,126 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b8365d49b74c,43869,1733077149759' ***** 2024-12-01T18:19:11,126 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:19:11,127 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-01T18:19:11,127 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T18:19:11,127 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-01T18:19:11,127 INFO [RS:0;b8365d49b74c:43869 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T18:19:11,127 INFO [RS:0;b8365d49b74c:43869 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T18:19:11,127 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(3579): Received CLOSE for 8dff54df69800b9dda3c10e113c6ab03 2024-12-01T18:19:11,127 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(1224): stopping server b8365d49b74c,43869,1733077149759 2024-12-01T18:19:11,127 DEBUG [RS:0;b8365d49b74c:43869 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:19:11,127 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T18:19:11,127 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T18:19:11,127 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T18:19:11,127 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-01T18:19:11,127 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 8dff54df69800b9dda3c10e113c6ab03, disabling compactions & flushes 2024-12-01T18:19:11,128 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. 2024-12-01T18:19:11,128 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. 2024-12-01T18:19:11,128 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. after waiting 0 ms 2024-12-01T18:19:11,128 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. 2024-12-01T18:19:11,128 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 8dff54df69800b9dda3c10e113c6ab03 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-01T18:19:11,128 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-01T18:19:11,128 DEBUG [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 8dff54df69800b9dda3c10e113c6ab03=hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03.} 2024-12-01T18:19:11,128 DEBUG [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 8dff54df69800b9dda3c10e113c6ab03 2024-12-01T18:19:11,128 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:19:11,128 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-01T18:19:11,128 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-01T18:19:11,128 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:19:11,128 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:19:11,128 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=1.23 KB heapSize=2.87 KB 2024-12-01T18:19:11,144 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/namespace/8dff54df69800b9dda3c10e113c6ab03/.tmp/info/e072f7a685034d18b697abf7b177ea56 is 45, key is default/info:d/1733077150963/Put/seqid=0 2024-12-01T18:19:11,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741839_1015 (size=5037) 2024-12-01T18:19:11,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741839_1015 (size=5037) 2024-12-01T18:19:11,149 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/namespace/8dff54df69800b9dda3c10e113c6ab03/.tmp/info/e072f7a685034d18b697abf7b177ea56 2024-12-01T18:19:11,149 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740/.tmp/info/5c4514716b8949e59e049ae5153aa666 is 143, key is hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03./info:regioninfo/1733077150898/Put/seqid=0 2024-12-01T18:19:11,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741840_1016 (size=6595) 2024-12-01T18:19:11,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741840_1016 (size=6595) 2024-12-01T18:19:11,155 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.14 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740/.tmp/info/5c4514716b8949e59e049ae5153aa666 2024-12-01T18:19:11,155 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/namespace/8dff54df69800b9dda3c10e113c6ab03/.tmp/info/e072f7a685034d18b697abf7b177ea56 as hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/namespace/8dff54df69800b9dda3c10e113c6ab03/info/e072f7a685034d18b697abf7b177ea56 2024-12-01T18:19:11,159 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/namespace/8dff54df69800b9dda3c10e113c6ab03/info/e072f7a685034d18b697abf7b177ea56, entries=2, sequenceid=6, filesize=4.9 K 2024-12-01T18:19:11,160 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 8dff54df69800b9dda3c10e113c6ab03 in 32ms, sequenceid=6, compaction requested=false 2024-12-01T18:19:11,164 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/namespace/8dff54df69800b9dda3c10e113c6ab03/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T18:19:11,165 INFO [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. 2024-12-01T18:19:11,165 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 8dff54df69800b9dda3c10e113c6ab03: 2024-12-01T18:19:11,165 DEBUG [RS_CLOSE_REGION-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733077150552.8dff54df69800b9dda3c10e113c6ab03. 2024-12-01T18:19:11,174 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740/.tmp/table/5809ed57fc424573b6910be600a06b53 is 51, key is hbase:namespace/table:state/1733077150903/Put/seqid=0 2024-12-01T18:19:11,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741841_1017 (size=5242) 2024-12-01T18:19:11,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741841_1017 (size=5242) 2024-12-01T18:19:11,178 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=94 B at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740/.tmp/table/5809ed57fc424573b6910be600a06b53 2024-12-01T18:19:11,183 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740/.tmp/info/5c4514716b8949e59e049ae5153aa666 as hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740/info/5c4514716b8949e59e049ae5153aa666 2024-12-01T18:19:11,187 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740/info/5c4514716b8949e59e049ae5153aa666, entries=10, sequenceid=9, filesize=6.4 K 2024-12-01T18:19:11,188 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740/.tmp/table/5809ed57fc424573b6910be600a06b53 as hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740/table/5809ed57fc424573b6910be600a06b53 2024-12-01T18:19:11,192 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740/table/5809ed57fc424573b6910be600a06b53, entries=2, sequenceid=9, filesize=5.1 K 2024-12-01T18:19:11,193 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~1.23 KB/1264, heapSize ~2.59 KB/2648, currentSize=0 B/0 for 1588230740 in 65ms, sequenceid=9, compaction requested=false 2024-12-01T18:19:11,196 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/data/hbase/meta/1588230740/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-01T18:19:11,197 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T18:19:11,197 INFO [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-01T18:19:11,197 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-01T18:19:11,197 DEBUG [RS_CLOSE_META-regionserver/b8365d49b74c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-01T18:19:11,328 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(1250): stopping server b8365d49b74c,43869,1733077149759; all regions closed. 2024-12-01T18:19:11,328 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/b8365d49b74c,43869,1733077149759 2024-12-01T18:19:11,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741834_1010 (size=2484) 2024-12-01T18:19:11,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741834_1010 (size=2484) 2024-12-01T18:19:11,333 DEBUG [RS:0;b8365d49b74c:43869 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/oldWALs 2024-12-01T18:19:11,333 INFO [RS:0;b8365d49b74c:43869 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b8365d49b74c%2C43869%2C1733077149759.meta:.meta(num 1733077150515) 2024-12-01T18:19:11,333 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/WALs/b8365d49b74c,43869,1733077149759 2024-12-01T18:19:11,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741833_1009 (size=1414) 2024-12-01T18:19:11,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741833_1009 (size=1414) 2024-12-01T18:19:11,337 DEBUG [RS:0;b8365d49b74c:43869 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/oldWALs 2024-12-01T18:19:11,337 INFO [RS:0;b8365d49b74c:43869 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b8365d49b74c%2C43869%2C1733077149759:(num 1733077150155) 2024-12-01T18:19:11,337 DEBUG [RS:0;b8365d49b74c:43869 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:19:11,338 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:19:11,338 INFO [RS:0;b8365d49b74c:43869 {}] hbase.ChoreService(370): Chore service for: regionserver/b8365d49b74c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-01T18:19:11,338 INFO [regionserver/b8365d49b74c:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-01T18:19:11,338 INFO [RS:0;b8365d49b74c:43869 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:43869 2024-12-01T18:19:11,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b8365d49b74c,43869,1733077149759 2024-12-01T18:19:11,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:19:11,341 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b8365d49b74c,43869,1733077149759] 2024-12-01T18:19:11,341 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b8365d49b74c,43869,1733077149759; numProcessing=1 2024-12-01T18:19:11,343 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b8365d49b74c,43869,1733077149759 already deleted, retry=false 2024-12-01T18:19:11,343 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b8365d49b74c,43869,1733077149759 expired; onlineServers=0 2024-12-01T18:19:11,343 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b8365d49b74c,34251,1733077149704' ***** 2024-12-01T18:19:11,343 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T18:19:11,344 DEBUG [M:0;b8365d49b74c:34251 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ff3be40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b8365d49b74c/172.17.0.2:0 2024-12-01T18:19:11,344 INFO [M:0;b8365d49b74c:34251 {}] regionserver.HRegionServer(1224): stopping server b8365d49b74c,34251,1733077149704 2024-12-01T18:19:11,344 INFO [M:0;b8365d49b74c:34251 {}] regionserver.HRegionServer(1250): stopping server b8365d49b74c,34251,1733077149704; all regions closed. 2024-12-01T18:19:11,344 DEBUG [M:0;b8365d49b74c:34251 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:19:11,344 DEBUG [M:0;b8365d49b74c:34251 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T18:19:11,344 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T18:19:11,344 DEBUG [M:0;b8365d49b74c:34251 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T18:19:11,344 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733077149921 {}] cleaner.HFileCleaner(306): Exit Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.large.0-1733077149921,5,FailOnTimeoutGroup] 2024-12-01T18:19:11,344 DEBUG [master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733077149921 {}] cleaner.HFileCleaner(306): Exit Thread[master/b8365d49b74c:0:becomeActiveMaster-HFileCleaner.small.0-1733077149921,5,FailOnTimeoutGroup] 2024-12-01T18:19:11,344 INFO [M:0;b8365d49b74c:34251 {}] hbase.ChoreService(370): Chore service for: master/b8365d49b74c:0 had [] on shutdown 2024-12-01T18:19:11,344 DEBUG [M:0;b8365d49b74c:34251 {}] master.HMaster(1733): Stopping service threads 2024-12-01T18:19:11,344 INFO [M:0;b8365d49b74c:34251 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T18:19:11,344 INFO [M:0;b8365d49b74c:34251 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T18:19:11,345 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T18:19:11,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T18:19:11,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:19:11,346 DEBUG [M:0;b8365d49b74c:34251 {}] zookeeper.ZKUtil(347): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-01T18:19:11,346 WARN [M:0;b8365d49b74c:34251 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-01T18:19:11,346 INFO [M:0;b8365d49b74c:34251 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-01T18:19:11,346 INFO [M:0;b8365d49b74c:34251 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T18:19:11,346 DEBUG [M:0;b8365d49b74c:34251 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:19:11,346 INFO [M:0;b8365d49b74c:34251 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:19:11,346 DEBUG [M:0;b8365d49b74c:34251 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:19:11,346 DEBUG [M:0;b8365d49b74c:34251 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:19:11,346 DEBUG [M:0;b8365d49b74c:34251 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:19:11,346 INFO [M:0;b8365d49b74c:34251 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=25.32 KB heapSize=32.31 KB 2024-12-01T18:19:11,346 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:19:11,362 DEBUG [M:0;b8365d49b74c:34251 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d00909b9d8da4c1986a0bada35943665 is 82, key is hbase:meta,,1/info:regioninfo/1733077150535/Put/seqid=0 2024-12-01T18:19:11,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741842_1018 (size=5672) 2024-12-01T18:19:11,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741842_1018 (size=5672) 2024-12-01T18:19:11,367 INFO [M:0;b8365d49b74c:34251 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d00909b9d8da4c1986a0bada35943665 2024-12-01T18:19:11,385 DEBUG [M:0;b8365d49b74c:34251 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6d7f826f235e46d3b2a9a40840f8bbfe is 696, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733077150908/Put/seqid=0 2024-12-01T18:19:11,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741843_1019 (size=6626) 2024-12-01T18:19:11,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741843_1019 (size=6626) 2024-12-01T18:19:11,391 INFO [M:0;b8365d49b74c:34251 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.72 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6d7f826f235e46d3b2a9a40840f8bbfe 2024-12-01T18:19:11,409 DEBUG [M:0;b8365d49b74c:34251 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/679de46255ae491fbf4215078d03224d is 69, key is b8365d49b74c,43869,1733077149759/rs:state/1733077150009/Put/seqid=0 2024-12-01T18:19:11,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741844_1020 (size=5156) 2024-12-01T18:19:11,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741844_1020 (size=5156) 2024-12-01T18:19:11,420 INFO [M:0;b8365d49b74c:34251 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/679de46255ae491fbf4215078d03224d 2024-12-01T18:19:11,439 DEBUG [M:0;b8365d49b74c:34251 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6f574fcb0b53440bb029a387a900b5fe is 52, key is load_balancer_on/state:d/1733077151095/Put/seqid=0 2024-12-01T18:19:11,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:19:11,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43869-0x1004ece60df0001, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:19:11,443 INFO [RS:0;b8365d49b74c:43869 {}] regionserver.HRegionServer(1307): Exiting; stopping=b8365d49b74c,43869,1733077149759; zookeeper connection closed. 2024-12-01T18:19:11,443 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@67aa7e8e {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@67aa7e8e 2024-12-01T18:19:11,443 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-01T18:19:11,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741845_1021 (size=5056) 2024-12-01T18:19:11,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741845_1021 (size=5056) 2024-12-01T18:19:11,445 INFO [M:0;b8365d49b74c:34251 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6f574fcb0b53440bb029a387a900b5fe 2024-12-01T18:19:11,450 DEBUG [M:0;b8365d49b74c:34251 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d00909b9d8da4c1986a0bada35943665 as hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d00909b9d8da4c1986a0bada35943665 2024-12-01T18:19:11,455 INFO [M:0;b8365d49b74c:34251 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d00909b9d8da4c1986a0bada35943665, entries=8, sequenceid=70, filesize=5.5 K 2024-12-01T18:19:11,456 DEBUG [M:0;b8365d49b74c:34251 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6d7f826f235e46d3b2a9a40840f8bbfe as hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6d7f826f235e46d3b2a9a40840f8bbfe 2024-12-01T18:19:11,460 INFO [M:0;b8365d49b74c:34251 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6d7f826f235e46d3b2a9a40840f8bbfe, entries=8, sequenceid=70, filesize=6.5 K 2024-12-01T18:19:11,461 DEBUG [M:0;b8365d49b74c:34251 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/679de46255ae491fbf4215078d03224d as hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/679de46255ae491fbf4215078d03224d 2024-12-01T18:19:11,465 INFO [M:0;b8365d49b74c:34251 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/679de46255ae491fbf4215078d03224d, entries=1, sequenceid=70, filesize=5.0 K 2024-12-01T18:19:11,466 DEBUG [M:0;b8365d49b74c:34251 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6f574fcb0b53440bb029a387a900b5fe as hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6f574fcb0b53440bb029a387a900b5fe 2024-12-01T18:19:11,470 INFO [M:0;b8365d49b74c:34251 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40897/user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6f574fcb0b53440bb029a387a900b5fe, entries=1, sequenceid=70, filesize=4.9 K 2024-12-01T18:19:11,471 INFO [M:0;b8365d49b74c:34251 {}] regionserver.HRegion(3040): Finished flush of dataSize ~25.32 KB/25929, heapSize ~32.25 KB/33024, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=70, compaction requested=false 2024-12-01T18:19:11,473 INFO [M:0;b8365d49b74c:34251 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:19:11,473 DEBUG [M:0;b8365d49b74c:34251 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-01T18:19:11,473 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/31306ad1-9221-f391-c771-e69d0a8e085d/MasterData/WALs/b8365d49b74c,34251,1733077149704 2024-12-01T18:19:11,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41897 is added to blk_1073741830_1006 (size=31030) 2024-12-01T18:19:11,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46275 is added to blk_1073741830_1006 (size=31030) 2024-12-01T18:19:11,476 INFO [M:0;b8365d49b74c:34251 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-01T18:19:11,476 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-01T18:19:11,476 INFO [M:0;b8365d49b74c:34251 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:34251 2024-12-01T18:19:11,477 DEBUG [M:0;b8365d49b74c:34251 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/b8365d49b74c,34251,1733077149704 already deleted, retry=false 2024-12-01T18:19:11,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:19:11,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34251-0x1004ece60df0000, quorum=127.0.0.1:60348, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:19:11,579 INFO [M:0;b8365d49b74c:34251 {}] regionserver.HRegionServer(1307): Exiting; stopping=b8365d49b74c,34251,1733077149704; zookeeper connection closed. 2024-12-01T18:19:11,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4cf2aa1e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:19:11,582 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@440c487f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:19:11,582 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:19:11,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70952693{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:19:11,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f12d55f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/hadoop.log.dir/,STOPPED} 2024-12-01T18:19:11,584 WARN [BP-1075919058-172.17.0.2-1733077148986 heartbeating to localhost/127.0.0.1:40897 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:19:11,584 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:19:11,584 WARN [BP-1075919058-172.17.0.2-1733077148986 heartbeating to localhost/127.0.0.1:40897 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1075919058-172.17.0.2-1733077148986 (Datanode Uuid b9245653-5c4c-48e2-8687-1f21a7668664) service to localhost/127.0.0.1:40897 2024-12-01T18:19:11,584 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:19:11,585 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/cluster_8fd8916a-a496-31be-98e8-12e9b454bdee/dfs/data/data3/current/BP-1075919058-172.17.0.2-1733077148986 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:19:11,585 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/cluster_8fd8916a-a496-31be-98e8-12e9b454bdee/dfs/data/data4/current/BP-1075919058-172.17.0.2-1733077148986 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:19:11,585 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:19:11,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@84ee776{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:19:11,587 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a055ed1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:19:11,587 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:19:11,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f8c3143{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:19:11,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e5c24a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/hadoop.log.dir/,STOPPED} 2024-12-01T18:19:11,588 WARN [BP-1075919058-172.17.0.2-1733077148986 heartbeating to localhost/127.0.0.1:40897 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:19:11,588 WARN [BP-1075919058-172.17.0.2-1733077148986 heartbeating to localhost/127.0.0.1:40897 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1075919058-172.17.0.2-1733077148986 (Datanode Uuid 3dc3bdb3-cf47-49b0-a819-129739755a88) service to localhost/127.0.0.1:40897 2024-12-01T18:19:11,588 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:19:11,588 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:19:11,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/cluster_8fd8916a-a496-31be-98e8-12e9b454bdee/dfs/data/data1/current/BP-1075919058-172.17.0.2-1733077148986 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:19:11,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/cluster_8fd8916a-a496-31be-98e8-12e9b454bdee/dfs/data/data2/current/BP-1075919058-172.17.0.2-1733077148986 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:19:11,589 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:19:11,597 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b34cff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:19:11,597 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20acf7e0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:19:11,598 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:19:11,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75dc32b9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:19:11,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cd921e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7dbac08b-d41c-3ce2-ecb3-eb102f0a66ff/hadoop.log.dir/,STOPPED} 2024-12-01T18:19:11,605 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-01T18:19:11,620 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-01T18:19:11,629 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=146 (was 124) - Thread LEAK? -, OpenFileDescriptor=517 (was 490) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=127 (was 127), ProcessCount=11 (was 11), AvailableMemoryMB=2834 (was 2844)